repo_name
stringclasses
6 values
pr_number
int64
512
78.9k
pr_title
stringlengths
3
144
pr_description
stringlengths
0
30.3k
author
stringlengths
2
21
date_created
timestamp[ns, tz=UTC]
date_merged
timestamp[ns, tz=UTC]
previous_commit
stringlengths
40
40
pr_commit
stringlengths
40
40
query
stringlengths
17
30.4k
filepath
stringlengths
9
210
before_content
stringlengths
0
112M
after_content
stringlengths
0
112M
label
int64
-1
1
dotnet/runtime
66,110
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client
Fixes https://github.com/dotnet/runtime/issues/66099
mdh1418
2022-03-02T21:21:59Z
2022-03-03T01:22:51Z
48b6648e2f8ac01b24f26fc563d831f408e14795
73471b51fb55198bc089f342cd75e077cc4762a8
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client. Fixes https://github.com/dotnet/runtime/issues/66099
./src/tests/Interop/PInvoke/Variant/VariantTest.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <!-- Test unsupported outside of windows --> <CLRTestTargetUnsupported Condition="'$(TargetsWindows)' != 'true'">true</CLRTestTargetUnsupported> </PropertyGroup> <ItemGroup> <Compile Include="PInvokeDefs.cs" /> <Compile Include="VariantTest.cs" /> <Compile Include="VariantTest.BuiltInCom.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="CMakeLists.txt" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <!-- Test unsupported outside of windows --> <CLRTestTargetUnsupported Condition="'$(TargetsWindows)' != 'true'">true</CLRTestTargetUnsupported> </PropertyGroup> <ItemGroup> <Compile Include="PInvokeDefs.cs" /> <Compile Include="VariantTest.cs" /> <Compile Include="VariantTest.BuiltInCom.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="CMakeLists.txt" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,110
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client
Fixes https://github.com/dotnet/runtime/issues/66099
mdh1418
2022-03-02T21:21:59Z
2022-03-03T01:22:51Z
48b6648e2f8ac01b24f26fc563d831f408e14795
73471b51fb55198bc089f342cd75e077cc4762a8
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client. Fixes https://github.com/dotnet/runtime/issues/66099
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/ShiftRightArithmeticAdd.Vector128.Int64.1.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void ShiftRightArithmeticAdd_Vector128_Int64_1() { var test = new ImmBinaryOpTest__ShiftRightArithmeticAdd_Vector128_Int64_1(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ImmBinaryOpTest__ShiftRightArithmeticAdd_Vector128_Int64_1 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int64[] inArray1, Int64[] inArray2, Int64[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int64>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int64>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int64, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int64> _fld1; public Vector128<Int64> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); return testStruct; } public void RunStructFldScenario(ImmBinaryOpTest__ShiftRightArithmeticAdd_Vector128_Int64_1 testClass) { var result = AdvSimd.ShiftRightArithmeticAdd(_fld1, _fld2, 1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(ImmBinaryOpTest__ShiftRightArithmeticAdd_Vector128_Int64_1 testClass) { fixed (Vector128<Int64>* pFld1 = &_fld1) fixed (Vector128<Int64>* pFld2 = &_fld2) { var result = AdvSimd.ShiftRightArithmeticAdd( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector128((Int64*)(pFld2)), 1 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly byte Imm = 1; private static Int64[] _data1 = new Int64[Op1ElementCount]; private static Int64[] _data2 = new Int64[Op2ElementCount]; private static Vector128<Int64> _clsVar1; private static Vector128<Int64> _clsVar2; private Vector128<Int64> _fld1; private Vector128<Int64> _fld2; private DataTable _dataTable; static ImmBinaryOpTest__ShiftRightArithmeticAdd_Vector128_Int64_1() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); } public ImmBinaryOpTest__ShiftRightArithmeticAdd_Vector128_Int64_1() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } _dataTable = new DataTable(_data1, _data2, new Int64[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.ShiftRightArithmeticAdd( Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.ShiftRightArithmeticAdd( AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftRightArithmeticAdd), new Type[] { typeof(Vector128<Int64>), typeof(Vector128<Int64>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftRightArithmeticAdd), new Type[] { typeof(Vector128<Int64>), typeof(Vector128<Int64>), typeof(byte) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.ShiftRightArithmeticAdd( _clsVar1, _clsVar2, 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Int64>* pClsVar1 = &_clsVar1) fixed (Vector128<Int64>* pClsVar2 = &_clsVar2) { var result = AdvSimd.ShiftRightArithmeticAdd( AdvSimd.LoadVector128((Int64*)(pClsVar1)), AdvSimd.LoadVector128((Int64*)(pClsVar2)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr); var result = AdvSimd.ShiftRightArithmeticAdd(op1, op2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr)); var result = AdvSimd.ShiftRightArithmeticAdd(op1, op2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ImmBinaryOpTest__ShiftRightArithmeticAdd_Vector128_Int64_1(); var result = AdvSimd.ShiftRightArithmeticAdd(test._fld1, test._fld2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new ImmBinaryOpTest__ShiftRightArithmeticAdd_Vector128_Int64_1(); fixed (Vector128<Int64>* pFld1 = &test._fld1) fixed (Vector128<Int64>* pFld2 = &test._fld2) { var result = AdvSimd.ShiftRightArithmeticAdd( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector128((Int64*)(pFld2)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.ShiftRightArithmeticAdd(_fld1, _fld2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Int64>* pFld1 = &_fld1) fixed (Vector128<Int64>* pFld2 = &_fld2) { var result = AdvSimd.ShiftRightArithmeticAdd( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector128((Int64*)(pFld2)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.ShiftRightArithmeticAdd(test._fld1, test._fld2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.ShiftRightArithmeticAdd( AdvSimd.LoadVector128((Int64*)(&test._fld1)), AdvSimd.LoadVector128((Int64*)(&test._fld2)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Int64> firstOp, Vector128<Int64> secondOp, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), firstOp); Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), secondOp); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* firstOp, void* secondOp, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector128<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(secondOp), (uint)Unsafe.SizeOf<Vector128<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int64[] firstOp, Int64[] secondOp, Int64[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.ShiftRightArithmeticAdd(firstOp[i], secondOp[i], Imm) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ShiftRightArithmeticAdd)}<Int64>(Vector128<Int64>, Vector128<Int64>, 1): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" secondOp: ({string.Join(", ", secondOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void ShiftRightArithmeticAdd_Vector128_Int64_1() { var test = new ImmBinaryOpTest__ShiftRightArithmeticAdd_Vector128_Int64_1(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ImmBinaryOpTest__ShiftRightArithmeticAdd_Vector128_Int64_1 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int64[] inArray1, Int64[] inArray2, Int64[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int64>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int64>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int64, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int64> _fld1; public Vector128<Int64> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); return testStruct; } public void RunStructFldScenario(ImmBinaryOpTest__ShiftRightArithmeticAdd_Vector128_Int64_1 testClass) { var result = AdvSimd.ShiftRightArithmeticAdd(_fld1, _fld2, 1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(ImmBinaryOpTest__ShiftRightArithmeticAdd_Vector128_Int64_1 testClass) { fixed (Vector128<Int64>* pFld1 = &_fld1) fixed (Vector128<Int64>* pFld2 = &_fld2) { var result = AdvSimd.ShiftRightArithmeticAdd( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector128((Int64*)(pFld2)), 1 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly byte Imm = 1; private static Int64[] _data1 = new Int64[Op1ElementCount]; private static Int64[] _data2 = new Int64[Op2ElementCount]; private static Vector128<Int64> _clsVar1; private static Vector128<Int64> _clsVar2; private Vector128<Int64> _fld1; private Vector128<Int64> _fld2; private DataTable _dataTable; static ImmBinaryOpTest__ShiftRightArithmeticAdd_Vector128_Int64_1() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); } public ImmBinaryOpTest__ShiftRightArithmeticAdd_Vector128_Int64_1() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } _dataTable = new DataTable(_data1, _data2, new Int64[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.ShiftRightArithmeticAdd( Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.ShiftRightArithmeticAdd( AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftRightArithmeticAdd), new Type[] { typeof(Vector128<Int64>), typeof(Vector128<Int64>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftRightArithmeticAdd), new Type[] { typeof(Vector128<Int64>), typeof(Vector128<Int64>), typeof(byte) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.ShiftRightArithmeticAdd( _clsVar1, _clsVar2, 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Int64>* pClsVar1 = &_clsVar1) fixed (Vector128<Int64>* pClsVar2 = &_clsVar2) { var result = AdvSimd.ShiftRightArithmeticAdd( AdvSimd.LoadVector128((Int64*)(pClsVar1)), AdvSimd.LoadVector128((Int64*)(pClsVar2)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr); var result = AdvSimd.ShiftRightArithmeticAdd(op1, op2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr)); var result = AdvSimd.ShiftRightArithmeticAdd(op1, op2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ImmBinaryOpTest__ShiftRightArithmeticAdd_Vector128_Int64_1(); var result = AdvSimd.ShiftRightArithmeticAdd(test._fld1, test._fld2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new ImmBinaryOpTest__ShiftRightArithmeticAdd_Vector128_Int64_1(); fixed (Vector128<Int64>* pFld1 = &test._fld1) fixed (Vector128<Int64>* pFld2 = &test._fld2) { var result = AdvSimd.ShiftRightArithmeticAdd( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector128((Int64*)(pFld2)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.ShiftRightArithmeticAdd(_fld1, _fld2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Int64>* pFld1 = &_fld1) fixed (Vector128<Int64>* pFld2 = &_fld2) { var result = AdvSimd.ShiftRightArithmeticAdd( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector128((Int64*)(pFld2)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.ShiftRightArithmeticAdd(test._fld1, test._fld2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.ShiftRightArithmeticAdd( AdvSimd.LoadVector128((Int64*)(&test._fld1)), AdvSimd.LoadVector128((Int64*)(&test._fld2)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Int64> firstOp, Vector128<Int64> secondOp, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), firstOp); Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), secondOp); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* firstOp, void* secondOp, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector128<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(secondOp), (uint)Unsafe.SizeOf<Vector128<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int64[] firstOp, Int64[] secondOp, Int64[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.ShiftRightArithmeticAdd(firstOp[i], secondOp[i], Imm) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ShiftRightArithmeticAdd)}<Int64>(Vector128<Int64>, Vector128<Int64>, 1): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" secondOp: ({string.Join(", ", secondOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,110
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client
Fixes https://github.com/dotnet/runtime/issues/66099
mdh1418
2022-03-02T21:21:59Z
2022-03-03T01:22:51Z
48b6648e2f8ac01b24f26fc563d831f408e14795
73471b51fb55198bc089f342cd75e077cc4762a8
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client. Fixes https://github.com/dotnet/runtime/issues/66099
./src/tests/JIT/Regression/CLR-x86-JIT/V1-M12-Beta2/b52746/b52746.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // namespace Test { using System; struct AA { static Array m_a; static bool[] m_ab; static object m_x; static int Main1() { if (m_ab[190]) { object L = (object)(double[])m_x; int[] L3 = new int[0x7fffffff]; try { if (m_a == (String[])L) return L3[0x7fffffff]; } catch (Exception) { } bool b = (bool)L; } return 0; } static int Main() { try { return Main1(); } catch (NullReferenceException) { return 100; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // namespace Test { using System; struct AA { static Array m_a; static bool[] m_ab; static object m_x; static int Main1() { if (m_ab[190]) { object L = (object)(double[])m_x; int[] L3 = new int[0x7fffffff]; try { if (m_a == (String[])L) return L3[0x7fffffff]; } catch (Exception) { } bool b = (bool)L; } return 0; } static int Main() { try { return Main1(); } catch (NullReferenceException) { return 100; } } } }
-1
dotnet/runtime
66,110
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client
Fixes https://github.com/dotnet/runtime/issues/66099
mdh1418
2022-03-02T21:21:59Z
2022-03-03T01:22:51Z
48b6648e2f8ac01b24f26fc563d831f408e14795
73471b51fb55198bc089f342cd75e077cc4762a8
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client. Fixes https://github.com/dotnet/runtime/issues/66099
./src/tests/JIT/HardwareIntrinsics/General/Vector256/GreaterThanOrEqualAny.UInt64.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void GreaterThanOrEqualAnyUInt64() { var test = new VectorBooleanBinaryOpTest__GreaterThanOrEqualAnyUInt64(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorBooleanBinaryOpTest__GreaterThanOrEqualAnyUInt64 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private GCHandle inHandle1; private GCHandle inHandle2; private ulong alignment; public DataTable(UInt64[] inArray1, UInt64[] inArray2, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt64>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt64>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt64, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt64, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector256<UInt64> _fld1; public Vector256<UInt64> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt64>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt64>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<UInt64>>()); return testStruct; } public void RunStructFldScenario(VectorBooleanBinaryOpTest__GreaterThanOrEqualAnyUInt64 testClass) { var result = Vector256.GreaterThanOrEqualAny(_fld1, _fld2); testClass.ValidateResult(_fld1, _fld2, result); } } private static readonly int LargestVectorSize = 32; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<UInt64>>() / sizeof(UInt64); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector256<UInt64>>() / sizeof(UInt64); private static UInt64[] _data1 = new UInt64[Op1ElementCount]; private static UInt64[] _data2 = new UInt64[Op2ElementCount]; private static Vector256<UInt64> _clsVar1; private static Vector256<UInt64> _clsVar2; private Vector256<UInt64> _fld1; private Vector256<UInt64> _fld2; private DataTable _dataTable; static VectorBooleanBinaryOpTest__GreaterThanOrEqualAnyUInt64() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt64>, byte>(ref _clsVar1), ref Unsafe.As<UInt64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt64>, byte>(ref _clsVar2), ref Unsafe.As<UInt64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<UInt64>>()); } public VectorBooleanBinaryOpTest__GreaterThanOrEqualAnyUInt64() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt64>, byte>(ref _fld1), ref Unsafe.As<UInt64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt64>, byte>(ref _fld2), ref Unsafe.As<UInt64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<UInt64>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt64(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); } _dataTable = new DataTable(_data1, _data2, LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Vector256.GreaterThanOrEqualAny( Unsafe.Read<Vector256<UInt64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<UInt64>>(_dataTable.inArray2Ptr) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var method = typeof(Vector256).GetMethod(nameof(Vector256.GreaterThanOrEqualAny), new Type[] { typeof(Vector256<UInt64>), typeof(Vector256<UInt64>) }); if (method is null) { method = typeof(Vector256).GetMethod(nameof(Vector256.GreaterThanOrEqualAny), 1, new Type[] { typeof(Vector256<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(Vector256<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) }); } if (method.IsGenericMethodDefinition) { method = method.MakeGenericMethod(typeof(UInt64)); } var result = method.Invoke(null, new object[] { Unsafe.Read<Vector256<UInt64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<UInt64>>(_dataTable.inArray2Ptr) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result)); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Vector256.GreaterThanOrEqualAny( _clsVar1, _clsVar2 ); ValidateResult(_clsVar1, _clsVar2, result); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector256<UInt64>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector256<UInt64>>(_dataTable.inArray2Ptr); var result = Vector256.GreaterThanOrEqualAny(op1, op2); ValidateResult(op1, op2, result); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorBooleanBinaryOpTest__GreaterThanOrEqualAnyUInt64(); var result = Vector256.GreaterThanOrEqualAny(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Vector256.GreaterThanOrEqualAny(_fld1, _fld2); ValidateResult(_fld1, _fld2, result); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Vector256.GreaterThanOrEqualAny(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector256<UInt64> op1, Vector256<UInt64> op2, bool result, [CallerMemberName] string method = "") { UInt64[] inArray1 = new UInt64[Op1ElementCount]; UInt64[] inArray2 = new UInt64[Op2ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray2[0]), op2); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(void* op1, void* op2, bool result, [CallerMemberName] string method = "") { UInt64[] inArray1 = new UInt64[Op1ElementCount]; UInt64[] inArray2 = new UInt64[Op2ElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector256<UInt64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector256<UInt64>>()); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(UInt64[] left, UInt64[] right, bool result, [CallerMemberName] string method = "") { bool succeeded = true; var expectedResult = false; for (var i = 0; i < Op1ElementCount; i++) { expectedResult |= (left[i] >= right[i]); } succeeded = (expectedResult == result); if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector256)}.{nameof(Vector256.GreaterThanOrEqualAny)}<UInt64>(Vector256<UInt64>, Vector256<UInt64>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({result})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void GreaterThanOrEqualAnyUInt64() { var test = new VectorBooleanBinaryOpTest__GreaterThanOrEqualAnyUInt64(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorBooleanBinaryOpTest__GreaterThanOrEqualAnyUInt64 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private GCHandle inHandle1; private GCHandle inHandle2; private ulong alignment; public DataTable(UInt64[] inArray1, UInt64[] inArray2, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt64>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt64>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt64, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt64, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector256<UInt64> _fld1; public Vector256<UInt64> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt64>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt64>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<UInt64>>()); return testStruct; } public void RunStructFldScenario(VectorBooleanBinaryOpTest__GreaterThanOrEqualAnyUInt64 testClass) { var result = Vector256.GreaterThanOrEqualAny(_fld1, _fld2); testClass.ValidateResult(_fld1, _fld2, result); } } private static readonly int LargestVectorSize = 32; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<UInt64>>() / sizeof(UInt64); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector256<UInt64>>() / sizeof(UInt64); private static UInt64[] _data1 = new UInt64[Op1ElementCount]; private static UInt64[] _data2 = new UInt64[Op2ElementCount]; private static Vector256<UInt64> _clsVar1; private static Vector256<UInt64> _clsVar2; private Vector256<UInt64> _fld1; private Vector256<UInt64> _fld2; private DataTable _dataTable; static VectorBooleanBinaryOpTest__GreaterThanOrEqualAnyUInt64() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt64>, byte>(ref _clsVar1), ref Unsafe.As<UInt64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt64>, byte>(ref _clsVar2), ref Unsafe.As<UInt64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<UInt64>>()); } public VectorBooleanBinaryOpTest__GreaterThanOrEqualAnyUInt64() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt64>, byte>(ref _fld1), ref Unsafe.As<UInt64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt64>, byte>(ref _fld2), ref Unsafe.As<UInt64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<UInt64>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt64(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); } _dataTable = new DataTable(_data1, _data2, LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Vector256.GreaterThanOrEqualAny( Unsafe.Read<Vector256<UInt64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<UInt64>>(_dataTable.inArray2Ptr) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var method = typeof(Vector256).GetMethod(nameof(Vector256.GreaterThanOrEqualAny), new Type[] { typeof(Vector256<UInt64>), typeof(Vector256<UInt64>) }); if (method is null) { method = typeof(Vector256).GetMethod(nameof(Vector256.GreaterThanOrEqualAny), 1, new Type[] { typeof(Vector256<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(Vector256<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) }); } if (method.IsGenericMethodDefinition) { method = method.MakeGenericMethod(typeof(UInt64)); } var result = method.Invoke(null, new object[] { Unsafe.Read<Vector256<UInt64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<UInt64>>(_dataTable.inArray2Ptr) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result)); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Vector256.GreaterThanOrEqualAny( _clsVar1, _clsVar2 ); ValidateResult(_clsVar1, _clsVar2, result); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector256<UInt64>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector256<UInt64>>(_dataTable.inArray2Ptr); var result = Vector256.GreaterThanOrEqualAny(op1, op2); ValidateResult(op1, op2, result); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorBooleanBinaryOpTest__GreaterThanOrEqualAnyUInt64(); var result = Vector256.GreaterThanOrEqualAny(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Vector256.GreaterThanOrEqualAny(_fld1, _fld2); ValidateResult(_fld1, _fld2, result); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Vector256.GreaterThanOrEqualAny(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector256<UInt64> op1, Vector256<UInt64> op2, bool result, [CallerMemberName] string method = "") { UInt64[] inArray1 = new UInt64[Op1ElementCount]; UInt64[] inArray2 = new UInt64[Op2ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray2[0]), op2); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(void* op1, void* op2, bool result, [CallerMemberName] string method = "") { UInt64[] inArray1 = new UInt64[Op1ElementCount]; UInt64[] inArray2 = new UInt64[Op2ElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector256<UInt64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector256<UInt64>>()); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(UInt64[] left, UInt64[] right, bool result, [CallerMemberName] string method = "") { bool succeeded = true; var expectedResult = false; for (var i = 0; i < Op1ElementCount; i++) { expectedResult |= (left[i] >= right[i]); } succeeded = (expectedResult == result); if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector256)}.{nameof(Vector256.GreaterThanOrEqualAny)}<UInt64>(Vector256<UInt64>, Vector256<UInt64>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({result})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,110
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client
Fixes https://github.com/dotnet/runtime/issues/66099
mdh1418
2022-03-02T21:21:59Z
2022-03-03T01:22:51Z
48b6648e2f8ac01b24f26fc563d831f408e14795
73471b51fb55198bc089f342cd75e077cc4762a8
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client. Fixes https://github.com/dotnet/runtime/issues/66099
./src/tests/baseservices/varargs/varargsupport.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="varargsupport.il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="varargsupport.il" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,110
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client
Fixes https://github.com/dotnet/runtime/issues/66099
mdh1418
2022-03-02T21:21:59Z
2022-03-03T01:22:51Z
48b6648e2f8ac01b24f26fc563d831f408e14795
73471b51fb55198bc089f342cd75e077cc4762a8
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client. Fixes https://github.com/dotnet/runtime/issues/66099
./src/tests/JIT/HardwareIntrinsics/X86/Sse41/ConvertToVector128Int16.Byte.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void ConvertToVector128Int16Byte() { var test = new SimpleUnaryOpTest__ConvertToVector128Int16Byte(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates basic functionality works, using the pointer overload test.RunBasicScenario_Ptr(); if (Sse2.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates calling via reflection works, using the pointer overload test.RunReflectionScenario_Ptr(); if (Sse2.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Sse2.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local works test.RunLclFldScenario(); // Validates passing an instance member works test.RunFldScenario(); } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleUnaryOpTest__ConvertToVector128Int16Byte { private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Byte>>() / sizeof(Byte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int16>>() / sizeof(Int16); private static Byte[] _data = new Byte[Op1ElementCount]; private static Vector128<Byte> _clsVar; private Vector128<Byte> _fld; private SimpleUnaryOpTest__DataTable<Int16, Byte> _dataTable; static SimpleUnaryOpTest__ConvertToVector128Int16Byte() { var random = new Random(); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = (byte)(random.Next(0, byte.MaxValue)); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref _clsVar), ref Unsafe.As<Byte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); } public SimpleUnaryOpTest__ConvertToVector128Int16Byte() { Succeeded = true; var random = new Random(); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = (byte)(random.Next(0, byte.MaxValue)); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref _fld), ref Unsafe.As<Byte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = (byte)(random.Next(0, byte.MaxValue)); } _dataTable = new SimpleUnaryOpTest__DataTable<Int16, Byte>(_data, new Int16[RetElementCount], LargestVectorSize); } public bool IsSupported => Sse41.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { var result = Sse41.ConvertToVector128Int16( Unsafe.Read<Vector128<Byte>>(_dataTable.inArrayPtr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_Ptr() { var result = Sse41.ConvertToVector128Int16( (Byte*)_dataTable.inArrayPtr ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { var result = Sse41.ConvertToVector128Int16( Sse2.LoadVector128((Byte*)(_dataTable.inArrayPtr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_LoadAligned() { var result = Sse41.ConvertToVector128Int16( Sse2.LoadAlignedVector128((Byte*)(_dataTable.inArrayPtr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { var result = typeof(Sse41).GetMethod(nameof(Sse41.ConvertToVector128Int16), new Type[] { typeof(Vector128<Byte>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Byte>>(_dataTable.inArrayPtr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int16>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Ptr() { var result = typeof(Sse41).GetMethod(nameof(Sse41.ConvertToVector128Int16), new Type[] { typeof(Byte*) }) .Invoke(null, new object[] { Pointer.Box(_dataTable.inArrayPtr, typeof(Byte*)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int16>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { var result = typeof(Sse41).GetMethod(nameof(Sse41.ConvertToVector128Int16), new Type[] { typeof(Vector128<Byte>) }) .Invoke(null, new object[] { Sse2.LoadVector128((Byte*)(_dataTable.inArrayPtr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int16>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_LoadAligned() { var result = typeof(Sse41).GetMethod(nameof(Sse41.ConvertToVector128Int16), new Type[] { typeof(Vector128<Byte>) }) .Invoke(null, new object[] { Sse2.LoadAlignedVector128((Byte*)(_dataTable.inArrayPtr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int16>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { var result = Sse41.ConvertToVector128Int16( _clsVar ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { var firstOp = Unsafe.Read<Vector128<Byte>>(_dataTable.inArrayPtr); var result = Sse41.ConvertToVector128Int16(firstOp); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { var firstOp = Sse2.LoadVector128((Byte*)(_dataTable.inArrayPtr)); var result = Sse41.ConvertToVector128Int16(firstOp); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_LoadAligned() { var firstOp = Sse2.LoadAlignedVector128((Byte*)(_dataTable.inArrayPtr)); var result = Sse41.ConvertToVector128Int16(firstOp); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclFldScenario() { var test = new SimpleUnaryOpTest__ConvertToVector128Int16Byte(); var result = Sse41.ConvertToVector128Int16(test._fld); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunFldScenario() { var result = Sse41.ConvertToVector128Int16(_fld); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } public void RunUnsupportedScenario() { Succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { Succeeded = true; } } private void ValidateResult(Vector128<Byte> firstOp, void* result, [CallerMemberName] string method = "") { Byte[] inArray = new Byte[Op1ElementCount]; Int16[] outArray = new Int16[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Byte, byte>(ref inArray[0]), firstOp); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int16>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(void* firstOp, void* result, [CallerMemberName] string method = "") { Byte[] inArray = new Byte[Op1ElementCount]; Int16[] outArray = new Int16[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector128<Byte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int16>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(Byte[] firstOp, Int16[] result, [CallerMemberName] string method = "") { if (result[0] != firstOp[0]) { Succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != firstOp[i]) { Succeeded = false; break; } } } if (!Succeeded) { Console.WriteLine($"{nameof(Sse41)}.{nameof(Sse41.ConvertToVector128Int16)}<Int16>(Vector128<Byte>): {method} failed:"); Console.WriteLine($" firstOp: ({string.Join(", ", firstOp)})"); Console.WriteLine($" result: ({string.Join(", ", result)})"); Console.WriteLine(); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void ConvertToVector128Int16Byte() { var test = new SimpleUnaryOpTest__ConvertToVector128Int16Byte(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates basic functionality works, using the pointer overload test.RunBasicScenario_Ptr(); if (Sse2.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates calling via reflection works, using the pointer overload test.RunReflectionScenario_Ptr(); if (Sse2.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Sse2.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local works test.RunLclFldScenario(); // Validates passing an instance member works test.RunFldScenario(); } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleUnaryOpTest__ConvertToVector128Int16Byte { private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Byte>>() / sizeof(Byte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int16>>() / sizeof(Int16); private static Byte[] _data = new Byte[Op1ElementCount]; private static Vector128<Byte> _clsVar; private Vector128<Byte> _fld; private SimpleUnaryOpTest__DataTable<Int16, Byte> _dataTable; static SimpleUnaryOpTest__ConvertToVector128Int16Byte() { var random = new Random(); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = (byte)(random.Next(0, byte.MaxValue)); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref _clsVar), ref Unsafe.As<Byte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); } public SimpleUnaryOpTest__ConvertToVector128Int16Byte() { Succeeded = true; var random = new Random(); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = (byte)(random.Next(0, byte.MaxValue)); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref _fld), ref Unsafe.As<Byte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = (byte)(random.Next(0, byte.MaxValue)); } _dataTable = new SimpleUnaryOpTest__DataTable<Int16, Byte>(_data, new Int16[RetElementCount], LargestVectorSize); } public bool IsSupported => Sse41.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { var result = Sse41.ConvertToVector128Int16( Unsafe.Read<Vector128<Byte>>(_dataTable.inArrayPtr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_Ptr() { var result = Sse41.ConvertToVector128Int16( (Byte*)_dataTable.inArrayPtr ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { var result = Sse41.ConvertToVector128Int16( Sse2.LoadVector128((Byte*)(_dataTable.inArrayPtr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_LoadAligned() { var result = Sse41.ConvertToVector128Int16( Sse2.LoadAlignedVector128((Byte*)(_dataTable.inArrayPtr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { var result = typeof(Sse41).GetMethod(nameof(Sse41.ConvertToVector128Int16), new Type[] { typeof(Vector128<Byte>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Byte>>(_dataTable.inArrayPtr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int16>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Ptr() { var result = typeof(Sse41).GetMethod(nameof(Sse41.ConvertToVector128Int16), new Type[] { typeof(Byte*) }) .Invoke(null, new object[] { Pointer.Box(_dataTable.inArrayPtr, typeof(Byte*)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int16>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { var result = typeof(Sse41).GetMethod(nameof(Sse41.ConvertToVector128Int16), new Type[] { typeof(Vector128<Byte>) }) .Invoke(null, new object[] { Sse2.LoadVector128((Byte*)(_dataTable.inArrayPtr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int16>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_LoadAligned() { var result = typeof(Sse41).GetMethod(nameof(Sse41.ConvertToVector128Int16), new Type[] { typeof(Vector128<Byte>) }) .Invoke(null, new object[] { Sse2.LoadAlignedVector128((Byte*)(_dataTable.inArrayPtr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int16>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { var result = Sse41.ConvertToVector128Int16( _clsVar ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { var firstOp = Unsafe.Read<Vector128<Byte>>(_dataTable.inArrayPtr); var result = Sse41.ConvertToVector128Int16(firstOp); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { var firstOp = Sse2.LoadVector128((Byte*)(_dataTable.inArrayPtr)); var result = Sse41.ConvertToVector128Int16(firstOp); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_LoadAligned() { var firstOp = Sse2.LoadAlignedVector128((Byte*)(_dataTable.inArrayPtr)); var result = Sse41.ConvertToVector128Int16(firstOp); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclFldScenario() { var test = new SimpleUnaryOpTest__ConvertToVector128Int16Byte(); var result = Sse41.ConvertToVector128Int16(test._fld); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunFldScenario() { var result = Sse41.ConvertToVector128Int16(_fld); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } public void RunUnsupportedScenario() { Succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { Succeeded = true; } } private void ValidateResult(Vector128<Byte> firstOp, void* result, [CallerMemberName] string method = "") { Byte[] inArray = new Byte[Op1ElementCount]; Int16[] outArray = new Int16[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Byte, byte>(ref inArray[0]), firstOp); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int16>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(void* firstOp, void* result, [CallerMemberName] string method = "") { Byte[] inArray = new Byte[Op1ElementCount]; Int16[] outArray = new Int16[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector128<Byte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int16>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(Byte[] firstOp, Int16[] result, [CallerMemberName] string method = "") { if (result[0] != firstOp[0]) { Succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != firstOp[i]) { Succeeded = false; break; } } } if (!Succeeded) { Console.WriteLine($"{nameof(Sse41)}.{nameof(Sse41.ConvertToVector128Int16)}<Int16>(Vector128<Byte>): {method} failed:"); Console.WriteLine($" firstOp: ({string.Join(", ", firstOp)})"); Console.WriteLine($" result: ({string.Join(", ", result)})"); Console.WriteLine(); } } } }
-1
dotnet/runtime
66,110
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client
Fixes https://github.com/dotnet/runtime/issues/66099
mdh1418
2022-03-02T21:21:59Z
2022-03-03T01:22:51Z
48b6648e2f8ac01b24f26fc563d831f408e14795
73471b51fb55198bc089f342cd75e077cc4762a8
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client. Fixes https://github.com/dotnet/runtime/issues/66099
./src/tests/JIT/Directed/shift/int16_d.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>full</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="int16.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>full</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="int16.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,110
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client
Fixes https://github.com/dotnet/runtime/issues/66099
mdh1418
2022-03-02T21:21:59Z
2022-03-03T01:22:51Z
48b6648e2f8ac01b24f26fc563d831f408e14795
73471b51fb55198bc089f342cd75e077cc4762a8
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client. Fixes https://github.com/dotnet/runtime/issues/66099
./src/tests/baseservices/exceptions/unittests/CollidedUnwind.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="CollidedUnwind.cs" /> <Compile Include="trace.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="CollidedUnwind.cs" /> <Compile Include="trace.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,110
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client
Fixes https://github.com/dotnet/runtime/issues/66099
mdh1418
2022-03-02T21:21:59Z
2022-03-03T01:22:51Z
48b6648e2f8ac01b24f26fc563d831f408e14795
73471b51fb55198bc089f342cd75e077cc4762a8
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client. Fixes https://github.com/dotnet/runtime/issues/66099
./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/XsltApiV2/QFE505_multith_customer_repro_with_or_expr.xsl
<?xml version="1.0" encoding="ISO-8859-1"?> <xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0"> <xsl:import href="layout.xsl" /> <xsl:import href="coupons.xsl" /> <xsl:template name="metadata"> <title>Categories</title> </xsl:template> <xsl:template name="extraheader"> <script type="text/javascript" language="javascript" src="../jscript/dates.js"> </script> </xsl:template> <xsl:template match="eventpage"> <div class="temp"> category.xsl </div> <xsl:apply-templates select="selectedcategory" /> </xsl:template> <xsl:template match="selectedcategory"> <h1><xsl:value-of select="@desc" /></h1> <xsl:apply-templates select="categorypath" /> <xsl:apply-templates select="categorygroupmarkets" /> <xsl:if test="/eventpage/selectedcategory[@typeID !=1 and @typeID !=2]"> <xsl:call-template name="timezonemessage" /> </xsl:if> <xsl:apply-templates select="categories" /> <xsl:apply-templates select="events" /> <xsl:if test="@typeID &gt; 2 and @typeID &lt; 6"> <xsl:call-template name="oddstypes"> <xsl:with-param name="params" select="concat('catid=',@ID)" /> </xsl:call-template> </xsl:if> </xsl:template> <xsl:template name="timezonemessage"> <div class="timezone"> GMT ( <script language="javascript" type="text/javascript">displaytimezone()</script> ) </div> </xsl:template> <xsl:template match="market" mode="eachway"> <xsl:text> </xsl:text> <xsl:apply-templates select="." mode="eachway_format"> <xsl:with-param name="caption">Each way</xsl:with-param> </xsl:apply-templates> </xsl:template> <!-- ================================================================== BREADCRUMB TRAIL =================================================================== --> <xsl:template match="categorypath"> <div class="breadcrumb"> <xsl:apply-templates select="/eventpage/sporttabs/sporttab[@ID=parent::sporttabs/@selected]" /> <xsl:apply-templates select="category" /> </div> </xsl:template> <xsl:template match="categorypath/category"> <a href="CategoryPage.aspx?tab={/eventpage/sporttabs/@selected}&amp;catid={@ID}"> <xsl:value-of select="@desc" /><xsl:text /> </a> <xsl:text> / </xsl:text> </xsl:template> <xsl:template match="categorypath/category[@ID=/eventpage/selectedcategory/@ID]"> <xsl:value-of select="@desc" /> </xsl:template> <xsl:template match="sporttab"> <a href="CategoryPage.aspx?tab={@ID}"> <xsl:value-of select="@desc" /><xsl:text /> </a> <xsl:text> / </xsl:text> </xsl:template> <!-- ================================================================== MARKET SELECTOR AT TOP OF PAGE USED BY CATEGORY 3 =================================================================== --> <xsl:template match="categorygroupmarkets"> <xsl:if test="parent::selectedcategory/@typeID=3 and markettype[position()=2]"> <div class="markettypes"> <xsl:apply-templates select="markettype" /> </div> </xsl:if> </xsl:template> <xsl:template match="markettype"> <span> <a href="CategoryPage.aspx?tab={/eventpage/sporttabs/@selected}&amp;catid={/eventpage/selectedcategory/@ID}&amp;mkttypeid={@ID}"> <xsl:value-of select="@desc" /> </a> </span> </xsl:template> <xsl:template match="markettype[@ID=parent::categorygroupmarkets/@selected]"> <span> <xsl:value-of select="@desc" /> </span> </xsl:template> <!-- ================================================================== CATEGORY 1 SUB-CATEGORIES LIST =================================================================== --> <xsl:template match="categories"> <xsl:apply-templates /> </xsl:template> <xsl:template match="categories/category"> <h2> <a href="CategoryPage.aspx?tab={/eventpage/sporttabs/@selected}&amp;catid={@ID}"> <xsl:value-of select="@desc" /> </a> </h2> <xsl:apply-templates select="@narrative[.!='']" /> </xsl:template> <xsl:template match="@narrative"> <div> <xsl:value-of select="." /> </div> </xsl:template> <!-- ================================================================== CATEGORY 2 LIST OF EVENTS IN CATEGORY =================================================================== --> <xsl:template match="selectedcategory[@typeID=2]/events" priority="2"> <ul> <xsl:apply-templates mode="category2" /> </ul> </xsl:template> <xsl:template match="event" mode="category2"> <li> <a href="CategoryPage.aspx?tab={/eventpage/sporttabs/@selected}&amp;evntid={@ID}"> <xsl:value-of select="@desc" /> </a> </li> </xsl:template> <!-- ================================================================== MAIN CATEGORY TEMPLATE REDIRECTION =================================================================== --> <!-- determine whether to branch off to list by markets or list by events --> <xsl:template match="selectedcategory/events"> <xsl:variable name="typeID" select="parent::selectedcategory/@typeID" /> <xsl:choose> <xsl:when test="$typeID = 3 or $typeID = 5"> <xsl:apply-templates select="parent::selectedcategory/categorygroupmarkets/markettype" mode="direct" /> </xsl:when> <xsl:otherwise> <xsl:apply-templates select="event" mode="direct" /> </xsl:otherwise> </xsl:choose> </xsl:template> <!-- list markets grouped by event - determine which style to use --> <xsl:template match="event" mode="direct"> <xsl:variable name="styleid" select="market[position()=1]/@styleID" /> <xsl:choose> <xsl:when test="$styleid = 1"> <xsl:apply-templates select="." mode="event1" /> </xsl:when> <xsl:otherwise> <xsl:apply-templates select="." mode="eventdefault"> <xsl:with-param name="styleID" select="$styleid" /> </xsl:apply-templates> </xsl:otherwise> </xsl:choose> </xsl:template> <!-- list events grouped by market - determine which style to use --> <xsl:template match="markettype" mode="direct"> <xsl:variable name="morebets"> <xsl:if test="@ID = parent::categorygroupmarkets/@selected and /eventpage/selectedcategory/@typeID = 3"> <xsl:text>1</xsl:text> </xsl:if> </xsl:variable> <xsl:variable name="styleid" select="parent::categorygroupmarkets/parent::selectedcategory/events/event/market[@typeID=current()/@ID]/@styleID" /> <xsl:choose> <xsl:when test="$styleid = 2 or $styleid = 3"> <xsl:apply-templates select="." mode="market2and3"> <xsl:with-param name="morebets" select="$morebets" /> </xsl:apply-templates> </xsl:when> <xsl:when test="$styleid = 4"> <xsl:apply-templates select="." mode="market4"> <xsl:with-param name="morebets" select="$morebets" /> </xsl:apply-templates> </xsl:when> <xsl:when test="$styleid = 5"> <xsl:apply-templates select="." mode="market5"> <xsl:with-param name="morebets" select="$morebets" /> </xsl:apply-templates> </xsl:when> <xsl:when test="$styleid = 6 or $styleid = 7"> <xsl:apply-templates select="." mode="market6and7"> <xsl:with-param name="morebets" select="$morebets" /> </xsl:apply-templates> </xsl:when> <xsl:otherwise> <xsl:apply-templates select="." mode="market1"> <xsl:with-param name="morebets" select="$morebets" /> </xsl:apply-templates> </xsl:otherwise> </xsl:choose> </xsl:template> <!-- ================================================================== DUMP - CATEGORY 7 =================================================================== --> <xsl:template match="selectedcategory[@typeID=7]/events"> <p> Category is no longer available. </p> </xsl:template> <!-- ================================================================== ASIAN HANDICAPS =================================================================== --> <xsl:template match="selectedcategory[@typeID=6]/events"> <p> Asian Note 1 </p> <p> Asian Note 2 </p> <xsl:apply-templates select="event" mode="asianhandicap" /> </xsl:template> <xsl:template match="event" mode="asianhandicap"> <table class="coupon"> <tr class="title"> <th colspan="3"> <xsl:value-of select="@desc" /> </th> </tr> <tr class="col"> <th> <xsl:apply-templates select="@date" mode="date" /> </th> <th> Asian Handicaps </th> <th> Asian Odds </th> </tr> <xsl:apply-templates select="market" mode="asianhandicap" /> </table> </xsl:template> <xsl:template match="market" mode="asianhandicap"> <xsl:apply-templates select="outcome" mode="asianhandicap" /> <xsl:apply-templates select="." mode="asiannarrative"> <xsl:with-param name="firstteam" select="outcome[@asianhandicapfactor='-1' or (@asianhandicapfactor='0' and position()=1)]" /> <xsl:with-param name="secondteam" select="outcome[@asianhandicapfactor='1' or (@asianhandicapfactor='0' and position()=2)]" /> <xsl:with-param name="swap"><xsl:if test="outcome[position()=1]/@asianhandicapfactor='1'">1</xsl:if></xsl:with-param> </xsl:apply-templates> </xsl:template> <xsl:template match="outcome" mode="asianhandicap"> <tr> <xsl:call-template name="cssrowclass" /> <td> <xsl:value-of select="@desc" /> </td> <td> <xsl:if test="@asianhandicapfactor='-1'"> <xsl:text>-</xsl:text> </xsl:if> <xsl:apply-templates select="parent::market/@asiangoal1" /><xsl:text /> <xsl:apply-templates select="parent::market/@asiangoal2" /><xsl:text /> </td> <td> <xsl:apply-templates select="." mode="odds" /> </td> </tr> </xsl:template> <xsl:template match="@asiangoal2"> <xsl:text>/</xsl:text><xsl:value-of select="." /><xsl:text /> </xsl:template> <xsl:template match="@asiandeductpercent"> <xsl:text>(</xsl:text><xsl:value-of select="." /><xsl:text>%)</xsl:text> </xsl:template> <!-- ================================================================== ASIAN HANDICAP NARRATIVES =================================================================== --> <xsl:template match="market[@asiangoal1='2.5' and not(@asiangoal2)]" mode="asiannarrative"> <xsl:param name="firstteam" /> <xsl:param name="secondteam" /> Multi-lingual </xsl:template> <xsl:template match="market[@asiangoal1='2' and @asiangoal2='2.5']" mode="asiannarrative"> <xsl:param name="firstteam" /> <xsl:param name="secondteam" /> <xsl:param name="swap" /> <xsl:call-template name="displayasiandesc"> <xsl:with-param name="firstdesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="seconddesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="swap" select="$swap" /> </xsl:call-template> </xsl:template> <xsl:template match="market[@asiangoal1='2' and not(@asiangoal2)]" mode="asiannarrative"> <xsl:param name="firstteam" /> <xsl:param name="secondteam" /> <xsl:param name="swap" /> <xsl:call-template name="displayasiandesc"> <xsl:with-param name="firstdesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="seconddesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="swap" select="$swap" /> </xsl:call-template> </xsl:template> <xsl:template match="market[@asiangoal1='1.5' and @asiangoal2='2']" mode="asiannarrative"> <xsl:param name="firstteam" /> <xsl:param name="secondteam" /> <xsl:param name="swap" /> <xsl:call-template name="displayasiandesc"> <xsl:with-param name="firstdesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="seconddesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="swap" select="$swap" /> </xsl:call-template> </xsl:template> <xsl:template match="market[@asiangoal1='1.5' and not(@asiangoal2)]" mode="asiannarrative"> <xsl:param name="firstteam" /> <xsl:param name="secondteam" /> <xsl:param name="swap" /> <xsl:call-template name="displayasiandesc"> <xsl:with-param name="firstdesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="seconddesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="swap" select="$swap" /> </xsl:call-template> </xsl:template> <xsl:template match="market[@asiangoal1='1' and @asiangoal2='1.5']" mode="asiannarrative"> <xsl:param name="firstteam" /> <xsl:param name="secondteam" /> <xsl:param name="swap" /> <xsl:call-template name="displayasiandesc"> <xsl:with-param name="firstdesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="seconddesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="swap" select="$swap" /> </xsl:call-template> </xsl:template> <xsl:template match="market[@asiangoal1='1' and not(@asiangoal2)]" mode="asiannarrative"> <xsl:param name="firstteam" /> <xsl:param name="secondteam" /> <xsl:param name="swap" /> <xsl:call-template name="displayasiandesc"> <xsl:with-param name="firstdesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="seconddesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="swap" select="$swap" /> </xsl:call-template> </xsl:template> <xsl:template match="market[@asiangoal1='0.5' and @asiangoal2='1']" mode="asiannarrative"> <xsl:param name="firstteam" /> <xsl:param name="secondteam" /> <xsl:param name="swap" /> <xsl:call-template name="displayasiandesc"> <xsl:with-param name="firstdesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="seconddesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="swap" select="$swap" /> </xsl:call-template> </xsl:template> <xsl:template match="market[@asiangoal1='0.5' and not(@asiangoal2)]" mode="asiannarrative"> <xsl:param name="firstteam" /> <xsl:param name="secondteam" /> <xsl:param name="swap" /> <xsl:call-template name="displayasiandesc"> <xsl:with-param name="firstdesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="seconddesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="swap" select="$swap" /> </xsl:call-template> </xsl:template> <xsl:template match="market[@asiangoal1='0' and @asiangoal2='0.5']" mode="asiannarrative"> <xsl:param name="firstteam" /> <xsl:param name="secondteam" /> <xsl:param name="swap" /> <xsl:call-template name="displayasiandesc"> <xsl:with-param name="firstdesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="seconddesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="swap" select="$swap" /> </xsl:call-template> </xsl:template> <xsl:template match="market[@asiangoal1='0' and not(@asiangoal2)]" mode="asiannarrative"> <xsl:param name="firstteam" /> <xsl:param name="secondteam" /> <xsl:param name="swap" /> <xsl:call-template name="displayasiandesc"> <xsl:with-param name="firstdesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="seconddesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="swap" select="$swap" /> </xsl:call-template> </xsl:template> <!-- display an asian handicap description --> <xsl:template name="displayasiandesc"> <xsl:param name="firstdesc" /> <xsl:param name="seconddesc" /> <xsl:param name="swap" /> <tr> <td colspan="3"> <xsl:choose> <xsl:when test="$swap='1'"> <xsl:copy-of select="$seconddesc" /> <p> <xsl:copy-of select="$firstdesc" /> </p> </xsl:when> <xsl:otherwise> <xsl:copy-of select="$firstdesc" /> <p> <xsl:copy-of select="$seconddesc" /> </p> </xsl:otherwise> </xsl:choose> </td> </tr> </xsl:template> </xsl:stylesheet>
<?xml version="1.0" encoding="ISO-8859-1"?> <xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0"> <xsl:import href="layout.xsl" /> <xsl:import href="coupons.xsl" /> <xsl:template name="metadata"> <title>Categories</title> </xsl:template> <xsl:template name="extraheader"> <script type="text/javascript" language="javascript" src="../jscript/dates.js"> </script> </xsl:template> <xsl:template match="eventpage"> <div class="temp"> category.xsl </div> <xsl:apply-templates select="selectedcategory" /> </xsl:template> <xsl:template match="selectedcategory"> <h1><xsl:value-of select="@desc" /></h1> <xsl:apply-templates select="categorypath" /> <xsl:apply-templates select="categorygroupmarkets" /> <xsl:if test="/eventpage/selectedcategory[@typeID !=1 and @typeID !=2]"> <xsl:call-template name="timezonemessage" /> </xsl:if> <xsl:apply-templates select="categories" /> <xsl:apply-templates select="events" /> <xsl:if test="@typeID &gt; 2 and @typeID &lt; 6"> <xsl:call-template name="oddstypes"> <xsl:with-param name="params" select="concat('catid=',@ID)" /> </xsl:call-template> </xsl:if> </xsl:template> <xsl:template name="timezonemessage"> <div class="timezone"> GMT ( <script language="javascript" type="text/javascript">displaytimezone()</script> ) </div> </xsl:template> <xsl:template match="market" mode="eachway"> <xsl:text> </xsl:text> <xsl:apply-templates select="." mode="eachway_format"> <xsl:with-param name="caption">Each way</xsl:with-param> </xsl:apply-templates> </xsl:template> <!-- ================================================================== BREADCRUMB TRAIL =================================================================== --> <xsl:template match="categorypath"> <div class="breadcrumb"> <xsl:apply-templates select="/eventpage/sporttabs/sporttab[@ID=parent::sporttabs/@selected]" /> <xsl:apply-templates select="category" /> </div> </xsl:template> <xsl:template match="categorypath/category"> <a href="CategoryPage.aspx?tab={/eventpage/sporttabs/@selected}&amp;catid={@ID}"> <xsl:value-of select="@desc" /><xsl:text /> </a> <xsl:text> / </xsl:text> </xsl:template> <xsl:template match="categorypath/category[@ID=/eventpage/selectedcategory/@ID]"> <xsl:value-of select="@desc" /> </xsl:template> <xsl:template match="sporttab"> <a href="CategoryPage.aspx?tab={@ID}"> <xsl:value-of select="@desc" /><xsl:text /> </a> <xsl:text> / </xsl:text> </xsl:template> <!-- ================================================================== MARKET SELECTOR AT TOP OF PAGE USED BY CATEGORY 3 =================================================================== --> <xsl:template match="categorygroupmarkets"> <xsl:if test="parent::selectedcategory/@typeID=3 and markettype[position()=2]"> <div class="markettypes"> <xsl:apply-templates select="markettype" /> </div> </xsl:if> </xsl:template> <xsl:template match="markettype"> <span> <a href="CategoryPage.aspx?tab={/eventpage/sporttabs/@selected}&amp;catid={/eventpage/selectedcategory/@ID}&amp;mkttypeid={@ID}"> <xsl:value-of select="@desc" /> </a> </span> </xsl:template> <xsl:template match="markettype[@ID=parent::categorygroupmarkets/@selected]"> <span> <xsl:value-of select="@desc" /> </span> </xsl:template> <!-- ================================================================== CATEGORY 1 SUB-CATEGORIES LIST =================================================================== --> <xsl:template match="categories"> <xsl:apply-templates /> </xsl:template> <xsl:template match="categories/category"> <h2> <a href="CategoryPage.aspx?tab={/eventpage/sporttabs/@selected}&amp;catid={@ID}"> <xsl:value-of select="@desc" /> </a> </h2> <xsl:apply-templates select="@narrative[.!='']" /> </xsl:template> <xsl:template match="@narrative"> <div> <xsl:value-of select="." /> </div> </xsl:template> <!-- ================================================================== CATEGORY 2 LIST OF EVENTS IN CATEGORY =================================================================== --> <xsl:template match="selectedcategory[@typeID=2]/events" priority="2"> <ul> <xsl:apply-templates mode="category2" /> </ul> </xsl:template> <xsl:template match="event" mode="category2"> <li> <a href="CategoryPage.aspx?tab={/eventpage/sporttabs/@selected}&amp;evntid={@ID}"> <xsl:value-of select="@desc" /> </a> </li> </xsl:template> <!-- ================================================================== MAIN CATEGORY TEMPLATE REDIRECTION =================================================================== --> <!-- determine whether to branch off to list by markets or list by events --> <xsl:template match="selectedcategory/events"> <xsl:variable name="typeID" select="parent::selectedcategory/@typeID" /> <xsl:choose> <xsl:when test="$typeID = 3 or $typeID = 5"> <xsl:apply-templates select="parent::selectedcategory/categorygroupmarkets/markettype" mode="direct" /> </xsl:when> <xsl:otherwise> <xsl:apply-templates select="event" mode="direct" /> </xsl:otherwise> </xsl:choose> </xsl:template> <!-- list markets grouped by event - determine which style to use --> <xsl:template match="event" mode="direct"> <xsl:variable name="styleid" select="market[position()=1]/@styleID" /> <xsl:choose> <xsl:when test="$styleid = 1"> <xsl:apply-templates select="." mode="event1" /> </xsl:when> <xsl:otherwise> <xsl:apply-templates select="." mode="eventdefault"> <xsl:with-param name="styleID" select="$styleid" /> </xsl:apply-templates> </xsl:otherwise> </xsl:choose> </xsl:template> <!-- list events grouped by market - determine which style to use --> <xsl:template match="markettype" mode="direct"> <xsl:variable name="morebets"> <xsl:if test="@ID = parent::categorygroupmarkets/@selected and /eventpage/selectedcategory/@typeID = 3"> <xsl:text>1</xsl:text> </xsl:if> </xsl:variable> <xsl:variable name="styleid" select="parent::categorygroupmarkets/parent::selectedcategory/events/event/market[@typeID=current()/@ID]/@styleID" /> <xsl:choose> <xsl:when test="$styleid = 2 or $styleid = 3"> <xsl:apply-templates select="." mode="market2and3"> <xsl:with-param name="morebets" select="$morebets" /> </xsl:apply-templates> </xsl:when> <xsl:when test="$styleid = 4"> <xsl:apply-templates select="." mode="market4"> <xsl:with-param name="morebets" select="$morebets" /> </xsl:apply-templates> </xsl:when> <xsl:when test="$styleid = 5"> <xsl:apply-templates select="." mode="market5"> <xsl:with-param name="morebets" select="$morebets" /> </xsl:apply-templates> </xsl:when> <xsl:when test="$styleid = 6 or $styleid = 7"> <xsl:apply-templates select="." mode="market6and7"> <xsl:with-param name="morebets" select="$morebets" /> </xsl:apply-templates> </xsl:when> <xsl:otherwise> <xsl:apply-templates select="." mode="market1"> <xsl:with-param name="morebets" select="$morebets" /> </xsl:apply-templates> </xsl:otherwise> </xsl:choose> </xsl:template> <!-- ================================================================== DUMP - CATEGORY 7 =================================================================== --> <xsl:template match="selectedcategory[@typeID=7]/events"> <p> Category is no longer available. </p> </xsl:template> <!-- ================================================================== ASIAN HANDICAPS =================================================================== --> <xsl:template match="selectedcategory[@typeID=6]/events"> <p> Asian Note 1 </p> <p> Asian Note 2 </p> <xsl:apply-templates select="event" mode="asianhandicap" /> </xsl:template> <xsl:template match="event" mode="asianhandicap"> <table class="coupon"> <tr class="title"> <th colspan="3"> <xsl:value-of select="@desc" /> </th> </tr> <tr class="col"> <th> <xsl:apply-templates select="@date" mode="date" /> </th> <th> Asian Handicaps </th> <th> Asian Odds </th> </tr> <xsl:apply-templates select="market" mode="asianhandicap" /> </table> </xsl:template> <xsl:template match="market" mode="asianhandicap"> <xsl:apply-templates select="outcome" mode="asianhandicap" /> <xsl:apply-templates select="." mode="asiannarrative"> <xsl:with-param name="firstteam" select="outcome[@asianhandicapfactor='-1' or (@asianhandicapfactor='0' and position()=1)]" /> <xsl:with-param name="secondteam" select="outcome[@asianhandicapfactor='1' or (@asianhandicapfactor='0' and position()=2)]" /> <xsl:with-param name="swap"><xsl:if test="outcome[position()=1]/@asianhandicapfactor='1'">1</xsl:if></xsl:with-param> </xsl:apply-templates> </xsl:template> <xsl:template match="outcome" mode="asianhandicap"> <tr> <xsl:call-template name="cssrowclass" /> <td> <xsl:value-of select="@desc" /> </td> <td> <xsl:if test="@asianhandicapfactor='-1'"> <xsl:text>-</xsl:text> </xsl:if> <xsl:apply-templates select="parent::market/@asiangoal1" /><xsl:text /> <xsl:apply-templates select="parent::market/@asiangoal2" /><xsl:text /> </td> <td> <xsl:apply-templates select="." mode="odds" /> </td> </tr> </xsl:template> <xsl:template match="@asiangoal2"> <xsl:text>/</xsl:text><xsl:value-of select="." /><xsl:text /> </xsl:template> <xsl:template match="@asiandeductpercent"> <xsl:text>(</xsl:text><xsl:value-of select="." /><xsl:text>%)</xsl:text> </xsl:template> <!-- ================================================================== ASIAN HANDICAP NARRATIVES =================================================================== --> <xsl:template match="market[@asiangoal1='2.5' and not(@asiangoal2)]" mode="asiannarrative"> <xsl:param name="firstteam" /> <xsl:param name="secondteam" /> Multi-lingual </xsl:template> <xsl:template match="market[@asiangoal1='2' and @asiangoal2='2.5']" mode="asiannarrative"> <xsl:param name="firstteam" /> <xsl:param name="secondteam" /> <xsl:param name="swap" /> <xsl:call-template name="displayasiandesc"> <xsl:with-param name="firstdesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="seconddesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="swap" select="$swap" /> </xsl:call-template> </xsl:template> <xsl:template match="market[@asiangoal1='2' and not(@asiangoal2)]" mode="asiannarrative"> <xsl:param name="firstteam" /> <xsl:param name="secondteam" /> <xsl:param name="swap" /> <xsl:call-template name="displayasiandesc"> <xsl:with-param name="firstdesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="seconddesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="swap" select="$swap" /> </xsl:call-template> </xsl:template> <xsl:template match="market[@asiangoal1='1.5' and @asiangoal2='2']" mode="asiannarrative"> <xsl:param name="firstteam" /> <xsl:param name="secondteam" /> <xsl:param name="swap" /> <xsl:call-template name="displayasiandesc"> <xsl:with-param name="firstdesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="seconddesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="swap" select="$swap" /> </xsl:call-template> </xsl:template> <xsl:template match="market[@asiangoal1='1.5' and not(@asiangoal2)]" mode="asiannarrative"> <xsl:param name="firstteam" /> <xsl:param name="secondteam" /> <xsl:param name="swap" /> <xsl:call-template name="displayasiandesc"> <xsl:with-param name="firstdesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="seconddesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="swap" select="$swap" /> </xsl:call-template> </xsl:template> <xsl:template match="market[@asiangoal1='1' and @asiangoal2='1.5']" mode="asiannarrative"> <xsl:param name="firstteam" /> <xsl:param name="secondteam" /> <xsl:param name="swap" /> <xsl:call-template name="displayasiandesc"> <xsl:with-param name="firstdesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="seconddesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="swap" select="$swap" /> </xsl:call-template> </xsl:template> <xsl:template match="market[@asiangoal1='1' and not(@asiangoal2)]" mode="asiannarrative"> <xsl:param name="firstteam" /> <xsl:param name="secondteam" /> <xsl:param name="swap" /> <xsl:call-template name="displayasiandesc"> <xsl:with-param name="firstdesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="seconddesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="swap" select="$swap" /> </xsl:call-template> </xsl:template> <xsl:template match="market[@asiangoal1='0.5' and @asiangoal2='1']" mode="asiannarrative"> <xsl:param name="firstteam" /> <xsl:param name="secondteam" /> <xsl:param name="swap" /> <xsl:call-template name="displayasiandesc"> <xsl:with-param name="firstdesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="seconddesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="swap" select="$swap" /> </xsl:call-template> </xsl:template> <xsl:template match="market[@asiangoal1='0.5' and not(@asiangoal2)]" mode="asiannarrative"> <xsl:param name="firstteam" /> <xsl:param name="secondteam" /> <xsl:param name="swap" /> <xsl:call-template name="displayasiandesc"> <xsl:with-param name="firstdesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="seconddesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="swap" select="$swap" /> </xsl:call-template> </xsl:template> <xsl:template match="market[@asiangoal1='0' and @asiangoal2='0.5']" mode="asiannarrative"> <xsl:param name="firstteam" /> <xsl:param name="secondteam" /> <xsl:param name="swap" /> <xsl:call-template name="displayasiandesc"> <xsl:with-param name="firstdesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="seconddesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="swap" select="$swap" /> </xsl:call-template> </xsl:template> <xsl:template match="market[@asiangoal1='0' and not(@asiangoal2)]" mode="asiannarrative"> <xsl:param name="firstteam" /> <xsl:param name="secondteam" /> <xsl:param name="swap" /> <xsl:call-template name="displayasiandesc"> <xsl:with-param name="firstdesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="seconddesc"> Multi-lingual </xsl:with-param> <xsl:with-param name="swap" select="$swap" /> </xsl:call-template> </xsl:template> <!-- display an asian handicap description --> <xsl:template name="displayasiandesc"> <xsl:param name="firstdesc" /> <xsl:param name="seconddesc" /> <xsl:param name="swap" /> <tr> <td colspan="3"> <xsl:choose> <xsl:when test="$swap='1'"> <xsl:copy-of select="$seconddesc" /> <p> <xsl:copy-of select="$firstdesc" /> </p> </xsl:when> <xsl:otherwise> <xsl:copy-of select="$firstdesc" /> <p> <xsl:copy-of select="$seconddesc" /> </p> </xsl:otherwise> </xsl:choose> </td> </tr> </xsl:template> </xsl:stylesheet>
-1
dotnet/runtime
66,110
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client
Fixes https://github.com/dotnet/runtime/issues/66099
mdh1418
2022-03-02T21:21:59Z
2022-03-03T01:22:51Z
48b6648e2f8ac01b24f26fc563d831f408e14795
73471b51fb55198bc089f342cd75e077cc4762a8
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client. Fixes https://github.com/dotnet/runtime/issues/66099
./src/libraries/System.ServiceModel.Syndication/tests/TestFeeds/brief-entry-noerror.xml
<!-- Description: No errors should be produced by a minimal entry Expect: !Error --> <entry xmlns="http://www.w3.org/2005/Atom"> <title type="text">Atom-Powered Robots Run Amok</title> <link href="http://contoso.com/2003/12/13/atom03"/> <id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id> <updated>2003-12-13T18:30:02Z</updated> <summary type="text">Some text.</summary> <category term="MyCategory" /> <author> <name>John Doe</name> </author> </entry>
<!-- Description: No errors should be produced by a minimal entry Expect: !Error --> <entry xmlns="http://www.w3.org/2005/Atom"> <title type="text">Atom-Powered Robots Run Amok</title> <link href="http://contoso.com/2003/12/13/atom03"/> <id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id> <updated>2003-12-13T18:30:02Z</updated> <summary type="text">Some text.</summary> <category term="MyCategory" /> <author> <name>John Doe</name> </author> </entry>
-1
dotnet/runtime
66,110
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client
Fixes https://github.com/dotnet/runtime/issues/66099
mdh1418
2022-03-02T21:21:59Z
2022-03-03T01:22:51Z
48b6648e2f8ac01b24f26fc563d831f408e14795
73471b51fb55198bc089f342cd75e077cc4762a8
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client. Fixes https://github.com/dotnet/runtime/issues/66099
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest1055/Generated1055.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 } .assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) } //TYPES IN FORWARDER ASSEMBLIES: //TEST ASSEMBLY: .assembly Generated1055 { .hash algorithm 0x00008004 } .assembly extern xunit.core {} .class public BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class public BaseClass1 extends BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void BaseClass0::.ctor() ret } } .class public G3_C1527`1<T0> extends class G2_C545`2<class BaseClass1,class BaseClass0> implements class IBase2`2<!T0,class BaseClass0> { .method public hidebysig virtual instance string Method7<M0>() cil managed noinlining { ldstr "G3_C1527::Method7.16638<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase2<T0,class BaseClass0>.Method7'<M0>() cil managed noinlining { .override method instance string class IBase2`2<!T0,class BaseClass0>::Method7<[1]>() ldstr "G3_C1527::Method7.MI.16639<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod4451() cil managed noinlining { ldstr "G3_C1527::ClassMethod4451.16640()" ret } .method public hidebysig newslot virtual instance string ClassMethod4452<M0>() cil managed noinlining { ldstr "G3_C1527::ClassMethod4452.16641<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G2_C545`2<class BaseClass1,class BaseClass0>::.ctor() ret } } .class public G2_C545`2<T0, T1> extends class G1_C10`2<!T0,!T0> implements IBase0, class IBase2`2<!T0,!T0> { .method public hidebysig virtual instance string Method0() cil managed noinlining { ldstr "G2_C545::Method0.10008()" ret } .method public hidebysig newslot virtual instance string Method1() cil managed noinlining { ldstr "G2_C545::Method1.10009()" ret } .method public hidebysig newslot virtual instance string 'IBase0.Method1'() cil managed noinlining { .override method instance string IBase0::Method1() ldstr "G2_C545::Method1.MI.10010()" ret } .method public hidebysig virtual instance string Method2<M0>() cil managed noinlining { ldstr "G2_C545::Method2.10011<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase0.Method2'<M0>() cil managed noinlining { .override method instance string IBase0::Method2<[1]>() ldstr "G2_C545::Method2.MI.10012<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig virtual instance string Method3<M0>() cil managed noinlining { ldstr "G2_C545::Method3.10013<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase0.Method3'<M0>() cil managed noinlining { .override method instance string IBase0::Method3<[1]>() ldstr "G2_C545::Method3.MI.10014<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining { ldstr "G2_C545::Method7.10015<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod2459() cil managed noinlining { ldstr "G2_C545::ClassMethod2459.10016()" ret } .method public hidebysig newslot virtual instance string ClassMethod2460() cil managed noinlining { ldstr "G2_C545::ClassMethod2460.10017()" ret } .method public hidebysig newslot virtual instance string 'G1_C10<T0,T0>.ClassMethod1340'() cil managed noinlining { .override method instance string class G1_C10`2<!T0,!T0>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ret } .method public hidebysig newslot virtual instance string 'G1_C10<T0,T0>.ClassMethod1342'<M0>() cil managed noinlining { .override method instance string class G1_C10`2<!T0,!T0>::ClassMethod1342<[1]>() ldstr "G2_C545::ClassMethod1342.MI.10019<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G1_C10`2<!T0,!T0>::.ctor() ret } } .class interface public abstract IBase2`2<+T0, -T1> { .method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { } } .class public G1_C10`2<T0, T1> implements class IBase2`2<class BaseClass1,class BaseClass1>, class IBase1`1<!T0> { .method public hidebysig virtual instance string Method7<M0>() cil managed noinlining { ldstr "G1_C10::Method7.4844<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string Method4() cil managed noinlining { ldstr "G1_C10::Method4.4845()" ret } .method public hidebysig newslot virtual instance string 'IBase1<T0>.Method4'() cil managed noinlining { .override method instance string class IBase1`1<!T0>::Method4() ldstr "G1_C10::Method4.MI.4846()" ret } .method public hidebysig newslot virtual instance string Method5() cil managed noinlining { ldstr "G1_C10::Method5.4847()" ret } .method public hidebysig virtual instance string Method6<M0>() cil managed noinlining { ldstr "G1_C10::Method6.4848<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase1<T0>.Method6'<M0>() cil managed noinlining { .override method instance string class IBase1`1<!T0>::Method6<[1]>() ldstr "G1_C10::Method6.MI.4849<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod1340() cil managed noinlining { ldstr "G1_C10::ClassMethod1340.4850()" ret } .method public hidebysig newslot virtual instance string ClassMethod1341() cil managed noinlining { ldstr "G1_C10::ClassMethod1341.4851()" ret } .method public hidebysig newslot virtual instance string ClassMethod1342<M0>() cil managed noinlining { ldstr "G1_C10::ClassMethod1342.4852<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod1343<M0>() cil managed noinlining { ldstr "G1_C10::ClassMethod1343.4853<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class interface public abstract IBase0 { .method public hidebysig newslot abstract virtual instance string Method0() cil managed { } .method public hidebysig newslot abstract virtual instance string Method1() cil managed { } .method public hidebysig newslot abstract virtual instance string Method2<M0>() cil managed { } .method public hidebysig newslot abstract virtual instance string Method3<M0>() cil managed { } } .class interface public abstract IBase1`1<+T0> { .method public hidebysig newslot abstract virtual instance string Method4() cil managed { } .method public hidebysig newslot abstract virtual instance string Method5() cil managed { } .method public hidebysig newslot abstract virtual instance string Method6<M0>() cil managed { } } .class public auto ansi beforefieldinit Generated1055 { .method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1527.T<T0,(class G3_C1527`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 21 .locals init (string[] actualResults) ldc.i4.s 16 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1527.T<T0,(class G3_C1527`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 16 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::ClassMethod2459() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::ClassMethod2460() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::ClassMethod4451() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::ClassMethod4452<object>() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 14 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 15 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1527.A<(class G3_C1527`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 21 .locals init (string[] actualResults) ldc.i4.s 16 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1527.A<(class G3_C1527`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 16 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod2459() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod2460() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod4451() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod4452<object>() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 14 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 15 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1527.B<(class G3_C1527`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 21 .locals init (string[] actualResults) ldc.i4.s 16 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1527.B<(class G3_C1527`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 16 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod2459() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod2460() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod4451() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod4452<object>() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 14 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 15 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C545.T.T<T0,T1,(class G2_C545`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C545.T.T<T0,T1,(class G2_C545`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::ClassMethod2459() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::ClassMethod2460() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C545.A.T<T1,(class G2_C545`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C545.A.T<T1,(class G2_C545`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::ClassMethod2459() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::ClassMethod2460() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C545.A.A<(class G2_C545`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C545.A.A<(class G2_C545`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod2459() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod2460() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C545.A.B<(class G2_C545`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C545.A.B<(class G2_C545`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod2459() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod2460() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C545.B.T<T1,(class G2_C545`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C545.B.T<T1,(class G2_C545`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::ClassMethod2459() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::ClassMethod2460() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C545.B.A<(class G2_C545`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C545.B.A<(class G2_C545`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2459() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2460() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C545.B.B<(class G2_C545`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C545.B.B<(class G2_C545`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod2459() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod2460() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C10.T.T<T0,T1,(class G1_C10`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C10.T.T<T0,T1,(class G1_C10`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<!!T0,!!T1>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<!!T0,!!T1>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<!!T0,!!T1>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<!!T0,!!T1>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<!!T0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<!!T0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<!!T0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C10.A.T<T1,(class G1_C10`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C10.A.T<T1,(class G1_C10`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,!!T1>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,!!T1>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,!!T1>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,!!T1>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C10.A.A<(class G1_C10`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C10.A.A<(class G1_C10`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C10.A.B<(class G1_C10`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C10.A.B<(class G1_C10`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C10.B.T<T1,(class G1_C10`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C10.B.T<T1,(class G1_C10`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,!!T1>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,!!T1>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,!!T1>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,!!T1>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C10.B.A<(class G1_C10`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C10.B.A<(class G1_C10`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C10.B.B<(class G1_C10`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C10.B.B<(class G1_C10`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase0<(IBase0)W>(!!W inst, string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase0<(IBase0)W>(!!W inst, string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string IBase0::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string IBase0::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string IBase0::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string IBase0::Method3<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method public hidebysig static void MethodCallingTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calling Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1527`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G3_C1527::Method7.16638<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2460() ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2459() ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G3_C1527::Method7.16638<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method3<object>() ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method2<object>() ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method1() ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method0() ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod4452<object>() ldstr "G3_C1527::ClassMethod4452.16641<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod4451() ldstr "G3_C1527::ClassMethod4451.16640()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::Method7<object>() ldstr "G3_C1527::Method7.16638<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod2460() ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod2459() ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::Method3<object>() ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::Method2<object>() ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::Method1() ldstr "G2_C545::Method1.10009()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::Method0() ldstr "G2_C545::Method0.10008()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G3_C1527`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2460() ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2459() ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G3_C1527::Method7.16638<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method3<object>() ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method2<object>() ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method1() ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method0() ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod4452<object>() ldstr "G3_C1527::ClassMethod4452.16641<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod4451() ldstr "G3_C1527::ClassMethod4451.16640()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::Method7<object>() ldstr "G3_C1527::Method7.16638<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod2460() ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod2459() ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::Method3<object>() ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::Method2<object>() ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::Method1() ldstr "G2_C545::Method1.10009()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::Method0() ldstr "G2_C545::Method0.10008()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C545`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod2460() ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod2459() ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method3<object>() ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method2<object>() ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method1() ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method0() ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C545`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod2460() ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod2459() ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method3<object>() ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method2<object>() ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method1() ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method0() ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C545`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2460() ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2459() ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method3<object>() ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method2<object>() ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method1() ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method0() ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C545`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod2460() ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod2459() ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method3<object>() ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method2<object>() ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method1() ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method0() ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G1_C10`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1342<object>() ldstr "G1_C10::ClassMethod1342.4852<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1340() ldstr "G1_C10::ClassMethod1340.4850()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G1_C10`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C10`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1342<object>() ldstr "G1_C10::ClassMethod1342.4852<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1340() ldstr "G1_C10::ClassMethod1340.4850()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G1_C10`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C10`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1342<object>() ldstr "G1_C10::ClassMethod1342.4852<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1340() ldstr "G1_C10::ClassMethod1340.4850()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G1_C10`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() ldstr "G1_C10::ClassMethod1342.4852<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1340() ldstr "G1_C10::ClassMethod1340.4850()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void ConstrainedCallsTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Constrained Calls Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1527`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.T.T<class BaseClass1,class BaseClass1,class G3_C1527`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.T<class BaseClass1,class G3_C1527`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.B<class G3_C1527`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1527`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.IBase2.B.T<class BaseClass1,class G3_C1527`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.IBase2.B.B<class G3_C1527`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass1,class G3_C1527`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.B<class G3_C1527`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1527`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass1,class G3_C1527`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.A.B<class G3_C1527`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass0,class G3_C1527`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.A<class G3_C1527`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.G2_C545.T.T<class BaseClass1,class BaseClass0,class G3_C1527`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.G2_C545.B.T<class BaseClass0,class G3_C1527`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.G2_C545.B.A<class G3_C1527`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C545::Method0.10008()#G2_C545::Method1.MI.10010()#G2_C545::Method2.MI.10012<System.Object>()#G2_C545::Method3.MI.10014<System.Object>()#" call void Generated1055::M.IBase0<class G3_C1527`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G3_C1527::ClassMethod4451.16640()#G3_C1527::ClassMethod4452.16641<System.Object>()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.G3_C1527.T<class BaseClass0,class G3_C1527`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G3_C1527::ClassMethod4451.16640()#G3_C1527::ClassMethod4452.16641<System.Object>()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.G3_C1527.A<class G3_C1527`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1527`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass0,class G3_C1527`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.A.A<class G3_C1527`1<class BaseClass0>>(!!0,string) newobj instance void class G3_C1527`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.T.T<class BaseClass1,class BaseClass1,class G3_C1527`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.T<class BaseClass1,class G3_C1527`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.B<class G3_C1527`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1527`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.B.T<class BaseClass1,class G3_C1527`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.B.B<class G3_C1527`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass1,class G3_C1527`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.B<class G3_C1527`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1527`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass1,class G3_C1527`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.A.B<class G3_C1527`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass0,class G3_C1527`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.A<class G3_C1527`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.G2_C545.T.T<class BaseClass1,class BaseClass0,class G3_C1527`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.G2_C545.B.T<class BaseClass0,class G3_C1527`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.G2_C545.B.A<class G3_C1527`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C545::Method0.10008()#G2_C545::Method1.MI.10010()#G2_C545::Method2.MI.10012<System.Object>()#G2_C545::Method3.MI.10014<System.Object>()#" call void Generated1055::M.IBase0<class G3_C1527`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G3_C1527::ClassMethod4451.16640()#G3_C1527::ClassMethod4452.16641<System.Object>()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.G3_C1527.T<class BaseClass1,class G3_C1527`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G3_C1527::ClassMethod4451.16640()#G3_C1527::ClassMethod4452.16641<System.Object>()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.G3_C1527.B<class G3_C1527`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G3_C1527`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.B.T<class BaseClass0,class G3_C1527`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.B.A<class G3_C1527`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1527`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass0,class G3_C1527`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.A.A<class G3_C1527`1<class BaseClass1>>(!!0,string) newobj instance void class G2_C545`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.T.T<class BaseClass0,class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.A.T<class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.A.A<class G2_C545`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C545`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.T<class BaseClass1,class G2_C545`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.B<class G2_C545`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.A<class G2_C545`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C545`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass1,class G2_C545`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.B<class G2_C545`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.T.T<class BaseClass0,class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.A.T<class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.A.A<class G2_C545`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C545::Method0.10008()#G2_C545::Method1.MI.10010()#G2_C545::Method2.MI.10012<System.Object>()#G2_C545::Method3.MI.10014<System.Object>()#" call void Generated1055::M.IBase0<class G2_C545`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.A<class G2_C545`2<class BaseClass0,class BaseClass0>>(!!0,string) newobj instance void class G2_C545`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.T.T<class BaseClass0,class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.A.T<class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.A.A<class G2_C545`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C545`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.T<class BaseClass1,class G2_C545`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.B<class G2_C545`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.A<class G2_C545`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C545`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass1,class G2_C545`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.B<class G2_C545`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.T.T<class BaseClass0,class BaseClass1,class G2_C545`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.A.T<class BaseClass1,class G2_C545`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.A.B<class G2_C545`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C545::Method0.10008()#G2_C545::Method1.MI.10010()#G2_C545::Method2.MI.10012<System.Object>()#G2_C545::Method3.MI.10014<System.Object>()#" call void Generated1055::M.IBase0<class G2_C545`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.A<class G2_C545`2<class BaseClass0,class BaseClass1>>(!!0,string) newobj instance void class G2_C545`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.T.T<class BaseClass1,class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.T<class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.B<class G2_C545`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.B.T<class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.B.B<class G2_C545`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.B<class G2_C545`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.B<class G2_C545`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass0,class G2_C545`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.A<class G2_C545`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.T.T<class BaseClass1,class BaseClass0,class G2_C545`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.B.T<class BaseClass0,class G2_C545`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.B.A<class G2_C545`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C545::Method0.10008()#G2_C545::Method1.MI.10010()#G2_C545::Method2.MI.10012<System.Object>()#G2_C545::Method3.MI.10014<System.Object>()#" call void Generated1055::M.IBase0<class G2_C545`2<class BaseClass1,class BaseClass0>>(!!0,string) newobj instance void class G2_C545`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.T.T<class BaseClass1,class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.T<class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.B<class G2_C545`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.B.T<class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.B.B<class G2_C545`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.B<class G2_C545`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.B<class G2_C545`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass0,class G2_C545`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.A<class G2_C545`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.T.T<class BaseClass1,class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.B.T<class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.B.B<class G2_C545`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C545::Method0.10008()#G2_C545::Method1.MI.10010()#G2_C545::Method2.MI.10012<System.Object>()#G2_C545::Method3.MI.10014<System.Object>()#" call void Generated1055::M.IBase0<class G2_C545`2<class BaseClass1,class BaseClass1>>(!!0,string) newobj instance void class G1_C10`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.T.T<class BaseClass0,class BaseClass0,class G1_C10`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.A.T<class BaseClass0,class G1_C10`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.A.A<class G1_C10`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C10`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.T<class BaseClass1,class G1_C10`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.B<class G1_C10`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass0,class G1_C10`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.A<class G1_C10`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C10`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass1,class G1_C10`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.A.B<class G1_C10`2<class BaseClass0,class BaseClass0>>(!!0,string) newobj instance void class G1_C10`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.T.T<class BaseClass0,class BaseClass1,class G1_C10`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.A.T<class BaseClass1,class G1_C10`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.A.B<class G1_C10`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C10`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.T<class BaseClass1,class G1_C10`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.B<class G1_C10`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass0,class G1_C10`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.A<class G1_C10`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C10`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass1,class G1_C10`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.A.B<class G1_C10`2<class BaseClass0,class BaseClass1>>(!!0,string) newobj instance void class G1_C10`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.T.T<class BaseClass1,class BaseClass0,class G1_C10`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.T<class BaseClass0,class G1_C10`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.A<class G1_C10`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.T<class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.B<class G1_C10`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.B<class G1_C10`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.A.B<class G1_C10`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass0,class G1_C10`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.A<class G1_C10`2<class BaseClass1,class BaseClass0>>(!!0,string) newobj instance void class G1_C10`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.T.T<class BaseClass1,class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.T<class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.B<class G1_C10`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.T<class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.B<class G1_C10`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.B<class G1_C10`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.A.B<class G1_C10`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass0,class G1_C10`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.A<class G1_C10`2<class BaseClass1,class BaseClass1>>(!!0,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed { .maxstack 10 ldstr "===================== Struct Constrained Interface Calls Test =====================" call void [mscorlib]System.Console::WriteLine(string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void CalliTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calli Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1527`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1341() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1340() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G3_C1527::Method7.16638<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2460() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2459() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G3_C1527::Method7.16638<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method3<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method2<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method1() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method0() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1343<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1342<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1341() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1340() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method6<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method5() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method4() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::ClassMethod4452<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G3_C1527::ClassMethod4452.16641<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::ClassMethod4451() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G3_C1527::ClassMethod4451.16640()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G3_C1527::Method7.16638<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::ClassMethod2460() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::ClassMethod2459() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::Method3<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::Method2<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::Method1() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method1.10009()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::Method0() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method0.10008()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::ClassMethod1343<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::ClassMethod1342<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::ClassMethod1341() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::ClassMethod1340() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::Method5() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::Method4() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method4.4845()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G3_C1527`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1341() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1340() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2460() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2459() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G3_C1527::Method7.16638<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method3<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method2<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method1() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method0() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1343<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1342<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1341() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1340() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method6<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method5() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method4() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::ClassMethod4452<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G3_C1527::ClassMethod4452.16641<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::ClassMethod4451() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G3_C1527::ClassMethod4451.16640()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G3_C1527::Method7.16638<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::ClassMethod2460() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::ClassMethod2459() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::Method3<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::Method2<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::Method1() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method1.10009()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::Method0() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method0.10008()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::ClassMethod1343<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::ClassMethod1342<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::ClassMethod1341() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::ClassMethod1340() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::Method5() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::Method4() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method4.4845()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C545`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1343<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1342<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1341() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1340() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod2460() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod2459() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method3<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method2<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method1() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method0() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1343<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1342<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1341() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1340() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C545`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1343<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1342<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1341() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1340() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod2460() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod2459() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method3<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method2<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method1() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method0() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1343<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1342<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1341() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1340() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C545`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1341() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1340() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2460() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2459() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method3<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method2<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method1() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method0() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1343<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1342<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1341() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1340() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method6<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method5() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method4() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C545`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1341() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1340() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod2460() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod2459() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method3<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method2<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method1() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method0() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1341() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1340() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G1_C10`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1343<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1342<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::ClassMethod1342.4852<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1341() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1340() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::ClassMethod1340.4850()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G1_C10`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1343<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1342<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::ClassMethod1342.4852<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1341() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1340() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::ClassMethod1340.4850()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G1_C10`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1343<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1342<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::ClassMethod1342.4852<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1341() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1340() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::ClassMethod1340.4850()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method6<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method5() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method4() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G1_C10`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::ClassMethod1342.4852<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1341() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1340() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::ClassMethod1340.4850()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 10 call void Generated1055::MethodCallingTest() call void Generated1055::ConstrainedCallsTest() call void Generated1055::StructConstrainedInterfaceCallsTest() call void Generated1055::CalliTest() ldc.i4 100 ret } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 } .assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) } //TYPES IN FORWARDER ASSEMBLIES: //TEST ASSEMBLY: .assembly Generated1055 { .hash algorithm 0x00008004 } .assembly extern xunit.core {} .class public BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class public BaseClass1 extends BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void BaseClass0::.ctor() ret } } .class public G3_C1527`1<T0> extends class G2_C545`2<class BaseClass1,class BaseClass0> implements class IBase2`2<!T0,class BaseClass0> { .method public hidebysig virtual instance string Method7<M0>() cil managed noinlining { ldstr "G3_C1527::Method7.16638<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase2<T0,class BaseClass0>.Method7'<M0>() cil managed noinlining { .override method instance string class IBase2`2<!T0,class BaseClass0>::Method7<[1]>() ldstr "G3_C1527::Method7.MI.16639<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod4451() cil managed noinlining { ldstr "G3_C1527::ClassMethod4451.16640()" ret } .method public hidebysig newslot virtual instance string ClassMethod4452<M0>() cil managed noinlining { ldstr "G3_C1527::ClassMethod4452.16641<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G2_C545`2<class BaseClass1,class BaseClass0>::.ctor() ret } } .class public G2_C545`2<T0, T1> extends class G1_C10`2<!T0,!T0> implements IBase0, class IBase2`2<!T0,!T0> { .method public hidebysig virtual instance string Method0() cil managed noinlining { ldstr "G2_C545::Method0.10008()" ret } .method public hidebysig newslot virtual instance string Method1() cil managed noinlining { ldstr "G2_C545::Method1.10009()" ret } .method public hidebysig newslot virtual instance string 'IBase0.Method1'() cil managed noinlining { .override method instance string IBase0::Method1() ldstr "G2_C545::Method1.MI.10010()" ret } .method public hidebysig virtual instance string Method2<M0>() cil managed noinlining { ldstr "G2_C545::Method2.10011<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase0.Method2'<M0>() cil managed noinlining { .override method instance string IBase0::Method2<[1]>() ldstr "G2_C545::Method2.MI.10012<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig virtual instance string Method3<M0>() cil managed noinlining { ldstr "G2_C545::Method3.10013<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase0.Method3'<M0>() cil managed noinlining { .override method instance string IBase0::Method3<[1]>() ldstr "G2_C545::Method3.MI.10014<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining { ldstr "G2_C545::Method7.10015<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod2459() cil managed noinlining { ldstr "G2_C545::ClassMethod2459.10016()" ret } .method public hidebysig newslot virtual instance string ClassMethod2460() cil managed noinlining { ldstr "G2_C545::ClassMethod2460.10017()" ret } .method public hidebysig newslot virtual instance string 'G1_C10<T0,T0>.ClassMethod1340'() cil managed noinlining { .override method instance string class G1_C10`2<!T0,!T0>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ret } .method public hidebysig newslot virtual instance string 'G1_C10<T0,T0>.ClassMethod1342'<M0>() cil managed noinlining { .override method instance string class G1_C10`2<!T0,!T0>::ClassMethod1342<[1]>() ldstr "G2_C545::ClassMethod1342.MI.10019<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G1_C10`2<!T0,!T0>::.ctor() ret } } .class interface public abstract IBase2`2<+T0, -T1> { .method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { } } .class public G1_C10`2<T0, T1> implements class IBase2`2<class BaseClass1,class BaseClass1>, class IBase1`1<!T0> { .method public hidebysig virtual instance string Method7<M0>() cil managed noinlining { ldstr "G1_C10::Method7.4844<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string Method4() cil managed noinlining { ldstr "G1_C10::Method4.4845()" ret } .method public hidebysig newslot virtual instance string 'IBase1<T0>.Method4'() cil managed noinlining { .override method instance string class IBase1`1<!T0>::Method4() ldstr "G1_C10::Method4.MI.4846()" ret } .method public hidebysig newslot virtual instance string Method5() cil managed noinlining { ldstr "G1_C10::Method5.4847()" ret } .method public hidebysig virtual instance string Method6<M0>() cil managed noinlining { ldstr "G1_C10::Method6.4848<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase1<T0>.Method6'<M0>() cil managed noinlining { .override method instance string class IBase1`1<!T0>::Method6<[1]>() ldstr "G1_C10::Method6.MI.4849<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod1340() cil managed noinlining { ldstr "G1_C10::ClassMethod1340.4850()" ret } .method public hidebysig newslot virtual instance string ClassMethod1341() cil managed noinlining { ldstr "G1_C10::ClassMethod1341.4851()" ret } .method public hidebysig newslot virtual instance string ClassMethod1342<M0>() cil managed noinlining { ldstr "G1_C10::ClassMethod1342.4852<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod1343<M0>() cil managed noinlining { ldstr "G1_C10::ClassMethod1343.4853<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class interface public abstract IBase0 { .method public hidebysig newslot abstract virtual instance string Method0() cil managed { } .method public hidebysig newslot abstract virtual instance string Method1() cil managed { } .method public hidebysig newslot abstract virtual instance string Method2<M0>() cil managed { } .method public hidebysig newslot abstract virtual instance string Method3<M0>() cil managed { } } .class interface public abstract IBase1`1<+T0> { .method public hidebysig newslot abstract virtual instance string Method4() cil managed { } .method public hidebysig newslot abstract virtual instance string Method5() cil managed { } .method public hidebysig newslot abstract virtual instance string Method6<M0>() cil managed { } } .class public auto ansi beforefieldinit Generated1055 { .method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1527.T<T0,(class G3_C1527`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 21 .locals init (string[] actualResults) ldc.i4.s 16 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1527.T<T0,(class G3_C1527`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 16 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::ClassMethod2459() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::ClassMethod2460() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::ClassMethod4451() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::ClassMethod4452<object>() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 14 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 15 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<!!T0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1527.A<(class G3_C1527`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 21 .locals init (string[] actualResults) ldc.i4.s 16 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1527.A<(class G3_C1527`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 16 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod2459() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod2460() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod4451() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod4452<object>() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 14 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 15 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1527.B<(class G3_C1527`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 21 .locals init (string[] actualResults) ldc.i4.s 16 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1527.B<(class G3_C1527`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 16 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod2459() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod2460() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod4451() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod4452<object>() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 14 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 15 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1527`1<class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C545.T.T<T0,T1,(class G2_C545`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C545.T.T<T0,T1,(class G2_C545`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::ClassMethod2459() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::ClassMethod2460() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C545.A.T<T1,(class G2_C545`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C545.A.T<T1,(class G2_C545`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::ClassMethod2459() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::ClassMethod2460() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C545.A.A<(class G2_C545`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C545.A.A<(class G2_C545`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod2459() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod2460() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C545.A.B<(class G2_C545`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C545.A.B<(class G2_C545`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod2459() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod2460() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C545.B.T<T1,(class G2_C545`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C545.B.T<T1,(class G2_C545`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::ClassMethod2459() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::ClassMethod2460() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C545.B.A<(class G2_C545`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C545.B.A<(class G2_C545`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2459() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2460() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C545.B.B<(class G2_C545`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C545.B.B<(class G2_C545`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod2459() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod2460() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C10.T.T<T0,T1,(class G1_C10`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C10.T.T<T0,T1,(class G1_C10`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<!!T0,!!T1>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<!!T0,!!T1>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<!!T0,!!T1>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<!!T0,!!T1>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<!!T0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<!!T0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<!!T0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C10.A.T<T1,(class G1_C10`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C10.A.T<T1,(class G1_C10`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,!!T1>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,!!T1>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,!!T1>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,!!T1>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C10.A.A<(class G1_C10`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C10.A.A<(class G1_C10`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C10.A.B<(class G1_C10`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C10.A.B<(class G1_C10`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C10.B.T<T1,(class G1_C10`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C10.B.T<T1,(class G1_C10`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,!!T1>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,!!T1>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,!!T1>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,!!T1>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C10.B.A<(class G1_C10`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C10.B.A<(class G1_C10`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C10.B.B<(class G1_C10`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C10.B.B<(class G1_C10`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1340() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1341() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase0<(IBase0)W>(!!W inst, string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase0<(IBase0)W>(!!W inst, string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string IBase0::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string IBase0::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string IBase0::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string IBase0::Method3<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method public hidebysig static void MethodCallingTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calling Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1527`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G3_C1527::Method7.16638<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2460() ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2459() ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G3_C1527::Method7.16638<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method3<object>() ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method2<object>() ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method1() ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method0() ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod4452<object>() ldstr "G3_C1527::ClassMethod4452.16641<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod4451() ldstr "G3_C1527::ClassMethod4451.16640()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::Method7<object>() ldstr "G3_C1527::Method7.16638<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod2460() ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod2459() ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::Method3<object>() ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::Method2<object>() ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::Method1() ldstr "G2_C545::Method1.10009()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::Method0() ldstr "G2_C545::Method0.10008()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass0> callvirt instance string class G3_C1527`1<class BaseClass0>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G3_C1527`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2460() ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2459() ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G3_C1527::Method7.16638<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method3<object>() ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method2<object>() ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method1() ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method0() ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod4452<object>() ldstr "G3_C1527::ClassMethod4452.16641<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod4451() ldstr "G3_C1527::ClassMethod4451.16640()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::Method7<object>() ldstr "G3_C1527::Method7.16638<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod2460() ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod2459() ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::Method3<object>() ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::Method2<object>() ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::Method1() ldstr "G2_C545::Method1.10009()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::Method0() ldstr "G2_C545::Method0.10008()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1527`1<class BaseClass1> callvirt instance string class G3_C1527`1<class BaseClass1>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C545`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod2460() ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod2459() ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method3<object>() ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method2<object>() ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method1() ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method0() ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C545`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod2460() ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod2459() ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method3<object>() ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method2<object>() ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method1() ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method0() ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C545`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2460() ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2459() ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method3<object>() ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method2<object>() ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method1() ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method0() ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C545`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod2460() ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod2459() ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method3<object>() ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method2<object>() ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method1() ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method0() ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1340() ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C545`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G1_C10`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1342<object>() ldstr "G1_C10::ClassMethod1342.4852<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1340() ldstr "G1_C10::ClassMethod1340.4850()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G1_C10`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C10`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1342<object>() ldstr "G1_C10::ClassMethod1342.4852<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1340() ldstr "G1_C10::ClassMethod1340.4850()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G1_C10`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C10`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1342<object>() ldstr "G1_C10::ClassMethod1342.4852<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1340() ldstr "G1_C10::ClassMethod1340.4850()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G1_C10`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() ldstr "G1_C10::ClassMethod1342.4852<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1341() ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1340() ldstr "G1_C10::ClassMethod1340.4850()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C10`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void ConstrainedCallsTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Constrained Calls Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1527`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.T.T<class BaseClass1,class BaseClass1,class G3_C1527`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.T<class BaseClass1,class G3_C1527`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.B<class G3_C1527`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1527`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.IBase2.B.T<class BaseClass1,class G3_C1527`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.IBase2.B.B<class G3_C1527`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass1,class G3_C1527`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.B<class G3_C1527`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1527`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass1,class G3_C1527`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.A.B<class G3_C1527`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass0,class G3_C1527`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.A<class G3_C1527`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.G2_C545.T.T<class BaseClass1,class BaseClass0,class G3_C1527`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.G2_C545.B.T<class BaseClass0,class G3_C1527`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.G2_C545.B.A<class G3_C1527`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C545::Method0.10008()#G2_C545::Method1.MI.10010()#G2_C545::Method2.MI.10012<System.Object>()#G2_C545::Method3.MI.10014<System.Object>()#" call void Generated1055::M.IBase0<class G3_C1527`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G3_C1527::ClassMethod4451.16640()#G3_C1527::ClassMethod4452.16641<System.Object>()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.G3_C1527.T<class BaseClass0,class G3_C1527`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G3_C1527::ClassMethod4451.16640()#G3_C1527::ClassMethod4452.16641<System.Object>()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.G3_C1527.A<class G3_C1527`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1527`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass0,class G3_C1527`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.A.A<class G3_C1527`1<class BaseClass0>>(!!0,string) newobj instance void class G3_C1527`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.T.T<class BaseClass1,class BaseClass1,class G3_C1527`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.T<class BaseClass1,class G3_C1527`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.B<class G3_C1527`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1527`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.B.T<class BaseClass1,class G3_C1527`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.B.B<class G3_C1527`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass1,class G3_C1527`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.B<class G3_C1527`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1527`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass1,class G3_C1527`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.A.B<class G3_C1527`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass0,class G3_C1527`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.A<class G3_C1527`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.G2_C545.T.T<class BaseClass1,class BaseClass0,class G3_C1527`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.G2_C545.B.T<class BaseClass0,class G3_C1527`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.G2_C545.B.A<class G3_C1527`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C545::Method0.10008()#G2_C545::Method1.MI.10010()#G2_C545::Method2.MI.10012<System.Object>()#G2_C545::Method3.MI.10014<System.Object>()#" call void Generated1055::M.IBase0<class G3_C1527`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G3_C1527::ClassMethod4451.16640()#G3_C1527::ClassMethod4452.16641<System.Object>()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.G3_C1527.T<class BaseClass1,class G3_C1527`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G3_C1527::ClassMethod4451.16640()#G3_C1527::ClassMethod4452.16641<System.Object>()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G3_C1527::Method7.16638<System.Object>()#" call void Generated1055::M.G3_C1527.B<class G3_C1527`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G3_C1527`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.B.T<class BaseClass0,class G3_C1527`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.B.A<class G3_C1527`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1527`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass0,class G3_C1527`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1527::Method7.MI.16639<System.Object>()#" call void Generated1055::M.IBase2.A.A<class G3_C1527`1<class BaseClass1>>(!!0,string) newobj instance void class G2_C545`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.T.T<class BaseClass0,class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.A.T<class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.A.A<class G2_C545`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C545`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.T<class BaseClass1,class G2_C545`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.B<class G2_C545`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.A<class G2_C545`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C545`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass1,class G2_C545`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.B<class G2_C545`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.T.T<class BaseClass0,class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.A.T<class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.A.A<class G2_C545`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C545::Method0.10008()#G2_C545::Method1.MI.10010()#G2_C545::Method2.MI.10012<System.Object>()#G2_C545::Method3.MI.10014<System.Object>()#" call void Generated1055::M.IBase0<class G2_C545`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.A<class G2_C545`2<class BaseClass0,class BaseClass0>>(!!0,string) newobj instance void class G2_C545`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.T.T<class BaseClass0,class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.A.T<class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.A.A<class G2_C545`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C545`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.T<class BaseClass1,class G2_C545`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.B<class G2_C545`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.A<class G2_C545`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C545`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass1,class G2_C545`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.B<class G2_C545`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.T.T<class BaseClass0,class BaseClass1,class G2_C545`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.A.T<class BaseClass1,class G2_C545`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.A.B<class G2_C545`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C545::Method0.10008()#G2_C545::Method1.MI.10010()#G2_C545::Method2.MI.10012<System.Object>()#G2_C545::Method3.MI.10014<System.Object>()#" call void Generated1055::M.IBase0<class G2_C545`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass0,class G2_C545`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.A<class G2_C545`2<class BaseClass0,class BaseClass1>>(!!0,string) newobj instance void class G2_C545`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.T.T<class BaseClass1,class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.T<class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.B<class G2_C545`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.B.T<class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.B.B<class G2_C545`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.B<class G2_C545`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.B<class G2_C545`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass0,class G2_C545`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.A<class G2_C545`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.T.T<class BaseClass1,class BaseClass0,class G2_C545`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.B.T<class BaseClass0,class G2_C545`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.B.A<class G2_C545`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C545::Method0.10008()#G2_C545::Method1.MI.10010()#G2_C545::Method2.MI.10012<System.Object>()#G2_C545::Method3.MI.10014<System.Object>()#" call void Generated1055::M.IBase0<class G2_C545`2<class BaseClass1,class BaseClass0>>(!!0,string) newobj instance void class G2_C545`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.T.T<class BaseClass1,class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.T<class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.B<class G2_C545`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.B.T<class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.B.B<class G2_C545`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.B<class G2_C545`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.IBase2.A.B<class G2_C545`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass0,class G2_C545`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.A<class G2_C545`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.T.T<class BaseClass1,class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.B.T<class BaseClass1,class G2_C545`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C545::ClassMethod1340.MI.10018()#G1_C10::ClassMethod1341.4851()#G2_C545::ClassMethod1342.MI.10019<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G2_C545::ClassMethod2459.10016()#G2_C545::ClassMethod2460.10017()#G2_C545::Method0.10008()#G2_C545::Method1.10009()#G2_C545::Method2.10011<System.Object>()#G2_C545::Method3.10013<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G2_C545::Method7.10015<System.Object>()#" call void Generated1055::M.G2_C545.B.B<class G2_C545`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C545::Method0.10008()#G2_C545::Method1.MI.10010()#G2_C545::Method2.MI.10012<System.Object>()#G2_C545::Method3.MI.10014<System.Object>()#" call void Generated1055::M.IBase0<class G2_C545`2<class BaseClass1,class BaseClass1>>(!!0,string) newobj instance void class G1_C10`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.T.T<class BaseClass0,class BaseClass0,class G1_C10`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.A.T<class BaseClass0,class G1_C10`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.A.A<class G1_C10`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C10`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.T<class BaseClass1,class G1_C10`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.B<class G1_C10`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass0,class G1_C10`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.A<class G1_C10`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C10`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass1,class G1_C10`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.A.B<class G1_C10`2<class BaseClass0,class BaseClass0>>(!!0,string) newobj instance void class G1_C10`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.T.T<class BaseClass0,class BaseClass1,class G1_C10`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.A.T<class BaseClass1,class G1_C10`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.A.B<class G1_C10`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C10`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.T<class BaseClass1,class G1_C10`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.B<class G1_C10`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass0,class G1_C10`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.A<class G1_C10`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C10`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass1,class G1_C10`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.A.B<class G1_C10`2<class BaseClass0,class BaseClass1>>(!!0,string) newobj instance void class G1_C10`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.T.T<class BaseClass1,class BaseClass0,class G1_C10`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.T<class BaseClass0,class G1_C10`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.A<class G1_C10`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.T<class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.B<class G1_C10`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.B<class G1_C10`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.A.B<class G1_C10`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass0,class G1_C10`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.A<class G1_C10`2<class BaseClass1,class BaseClass0>>(!!0,string) newobj instance void class G1_C10`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.T.T<class BaseClass1,class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.T<class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::ClassMethod1340.4850()#G1_C10::ClassMethod1341.4851()#G1_C10::ClassMethod1342.4852<System.Object>()#G1_C10::ClassMethod1343.4853<System.Object>()#G1_C10::Method4.4845()#G1_C10::Method5.4847()#G1_C10::Method6.4848<System.Object>()#G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.G1_C10.B.B<class G1_C10`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.T<class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.B.B<class G1_C10`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.B<class G1_C10`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.A.T<class BaseClass1,class G1_C10`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method7.4844<System.Object>()#" call void Generated1055::M.IBase2.A.B<class G1_C10`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.T<class BaseClass0,class G1_C10`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C10::Method4.MI.4846()#G1_C10::Method5.4847()#G1_C10::Method6.MI.4849<System.Object>()#" call void Generated1055::M.IBase1.A<class G1_C10`2<class BaseClass1,class BaseClass1>>(!!0,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed { .maxstack 10 ldstr "===================== Struct Constrained Interface Calls Test =====================" call void [mscorlib]System.Console::WriteLine(string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void CalliTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calli Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1527`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1341() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1340() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G3_C1527::Method7.16638<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2460() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2459() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G3_C1527::Method7.16638<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method3<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method2<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method1() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method0() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1343<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1342<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1341() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1340() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method6<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method5() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method4() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::ClassMethod4452<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G3_C1527::ClassMethod4452.16641<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::ClassMethod4451() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G3_C1527::ClassMethod4451.16640()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G3_C1527::Method7.16638<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::ClassMethod2460() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::ClassMethod2459() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::Method3<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::Method2<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::Method1() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method1.10009()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::Method0() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::Method0.10008()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::ClassMethod1343<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::ClassMethod1342<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::ClassMethod1341() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::ClassMethod1340() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::Method5() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass0>::Method4() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G1_C10::Method4.4845()" ldstr "class G3_C1527`1<class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass0>) ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1527`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G3_C1527`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1341() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1340() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2460() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2459() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G3_C1527::Method7.16638<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method3<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method2<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method1() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method0() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1343<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1342<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1341() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1340() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method6<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method5() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method4() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::ClassMethod4452<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G3_C1527::ClassMethod4452.16641<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::ClassMethod4451() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G3_C1527::ClassMethod4451.16640()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G3_C1527::Method7.16638<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::ClassMethod2460() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::ClassMethod2459() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::Method3<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::Method2<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::Method1() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method1.10009()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::Method0() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::Method0.10008()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::ClassMethod1343<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::ClassMethod1342<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::ClassMethod1341() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::ClassMethod1340() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::Method5() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1527`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1527`1<class BaseClass1>::Method4() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G1_C10::Method4.4845()" ldstr "class G3_C1527`1<class BaseClass1> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1527`1<class BaseClass1>) ldstr "G3_C1527::Method7.MI.16639<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1527`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C545`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1343<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1342<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1341() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1340() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod2460() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod2459() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method3<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method2<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method1() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method0() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1343<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1342<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1341() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::ClassMethod1340() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass0>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C545`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1343<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1342<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1341() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1340() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod2460() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod2459() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method3<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method2<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method1() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method0() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1343<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1342<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1341() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::ClassMethod1340() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C545`2<class BaseClass0,class BaseClass1>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C545`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C545`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1341() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1340() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2460() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod2459() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method3<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method2<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method1() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method0() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1343<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1342<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1341() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::ClassMethod1340() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method6<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method5() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass0>::Method4() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass0>) ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C545`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1341() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1340() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod2460() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::ClassMethod2460.10017()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod2459() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::ClassMethod2459.10016()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::Method7.10015<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method3<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::Method3.10013<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method2<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::Method2.10011<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method1() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::Method1.10009()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method0() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::Method0.10008()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::ClassMethod1342.MI.10019<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1341() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::ClassMethod1340() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::ClassMethod1340.MI.10018()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C545`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C545`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method4.4845()" ldstr "class G2_C545`2<class BaseClass1,class BaseClass1> on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::Method0.10008()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::Method1.MI.10010()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::Method2.MI.10012<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G2_C545`2<class BaseClass1,class BaseClass1>) ldstr "G2_C545::Method3.MI.10014<System.Object>()" ldstr "IBase0 on type class G2_C545`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G1_C10`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1343<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1342<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::ClassMethod1342.4852<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1341() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::ClassMethod1340() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::ClassMethod1340.4850()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass0>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G1_C10`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1343<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1342<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::ClassMethod1342.4852<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1341() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass1>::ClassMethod1340() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::ClassMethod1340.4850()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G1_C10`2<class BaseClass0,class BaseClass1>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G1_C10`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1343<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1342<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::ClassMethod1342.4852<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1341() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass0>::ClassMethod1340() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::ClassMethod1340.4850()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method6<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method5() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method4() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass0>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G1_C10`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1343<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::ClassMethod1343.4853<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1342<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::ClassMethod1342.4852<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1341() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::ClassMethod1341.4851()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::ClassMethod1340() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::ClassMethod1340.4850()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method6.4848<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method4.4845()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C10`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C10`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class G1_C10`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method7.4844<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method4.MI.4846()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method5.4847()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G1_C10`2<class BaseClass1,class BaseClass1>) ldstr "G1_C10::Method6.MI.4849<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G1_C10`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 10 call void Generated1055::MethodCallingTest() call void Generated1055::ConstrainedCallsTest() call void Generated1055::StructConstrainedInterfaceCallsTest() call void Generated1055::CalliTest() ldc.i4 100 ret } }
-1
dotnet/runtime
66,110
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client
Fixes https://github.com/dotnet/runtime/issues/66099
mdh1418
2022-03-02T21:21:59Z
2022-03-03T01:22:51Z
48b6648e2f8ac01b24f26fc563d831f408e14795
73471b51fb55198bc089f342cd75e077cc4762a8
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client. Fixes https://github.com/dotnet/runtime/issues/66099
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest1446/Generated1446.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated1446.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated1446.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,110
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client
Fixes https://github.com/dotnet/runtime/issues/66099
mdh1418
2022-03-02T21:21:59Z
2022-03-03T01:22:51Z
48b6648e2f8ac01b24f26fc563d831f408e14795
73471b51fb55198bc089f342cd75e077cc4762a8
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client. Fixes https://github.com/dotnet/runtime/issues/66099
./src/coreclr/debug/ee/arm64/primitives.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #include "stdafx.h" #include "threads.h" #include "../../shared/arm64/primitives.cpp" void CopyREGDISPLAY(REGDISPLAY* pDst, REGDISPLAY* pSrc) { CONTEXT tmp; CopyRegDisplay(pSrc, pDst, &tmp); } #ifdef FEATURE_EMULATE_SINGLESTEP void SetSSFlag(DT_CONTEXT *, Thread *pThread) { _ASSERTE(pThread != NULL); pThread->EnableSingleStep(); } void UnsetSSFlag(DT_CONTEXT *, Thread *pThread) { _ASSERTE(pThread != NULL); pThread->DisableSingleStep(); } // Check if single stepping is enabled. bool IsSSFlagEnabled(DT_CONTEXT *, Thread *pThread) { _ASSERTE(pThread != NULL); return pThread->IsSingleStepEnabled(); } #else // FEATURE_EMULATE_SINGLESTEP void SetSSFlag(DT_CONTEXT *pContext, Thread *) { SetSSFlag(pContext); } void UnsetSSFlag(DT_CONTEXT *pContext, Thread *) { UnsetSSFlag(pContext); } // Check if single stepping is enabled. bool IsSSFlagEnabled(DT_CONTEXT *pContext, Thread *) { return IsSSFlagEnabled(pContext); } #endif // FEATURE_EMULATE_SINGLESTEP
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #include "stdafx.h" #include "threads.h" #include "../../shared/arm64/primitives.cpp" void CopyREGDISPLAY(REGDISPLAY* pDst, REGDISPLAY* pSrc) { CONTEXT tmp; CopyRegDisplay(pSrc, pDst, &tmp); } #ifdef FEATURE_EMULATE_SINGLESTEP void SetSSFlag(DT_CONTEXT *, Thread *pThread) { _ASSERTE(pThread != NULL); pThread->EnableSingleStep(); } void UnsetSSFlag(DT_CONTEXT *, Thread *pThread) { _ASSERTE(pThread != NULL); pThread->DisableSingleStep(); } // Check if single stepping is enabled. bool IsSSFlagEnabled(DT_CONTEXT *, Thread *pThread) { _ASSERTE(pThread != NULL); return pThread->IsSingleStepEnabled(); } #else // FEATURE_EMULATE_SINGLESTEP void SetSSFlag(DT_CONTEXT *pContext, Thread *) { SetSSFlag(pContext); } void UnsetSSFlag(DT_CONTEXT *pContext, Thread *) { UnsetSSFlag(pContext); } // Check if single stepping is enabled. bool IsSSFlagEnabled(DT_CONTEXT *pContext, Thread *) { return IsSSFlagEnabled(pContext); } #endif // FEATURE_EMULATE_SINGLESTEP
-1
dotnet/runtime
66,110
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client
Fixes https://github.com/dotnet/runtime/issues/66099
mdh1418
2022-03-02T21:21:59Z
2022-03-03T01:22:51Z
48b6648e2f8ac01b24f26fc563d831f408e14795
73471b51fb55198bc089f342cd75e077cc4762a8
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client. Fixes https://github.com/dotnet/runtime/issues/66099
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest367/Generated367.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated367.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated367.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,110
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client
Fixes https://github.com/dotnet/runtime/issues/66099
mdh1418
2022-03-02T21:21:59Z
2022-03-03T01:22:51Z
48b6648e2f8ac01b24f26fc563d831f408e14795
73471b51fb55198bc089f342cd75e077cc4762a8
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client. Fixes https://github.com/dotnet/runtime/issues/66099
./src/libraries/Microsoft.Win32.Registry/src/Microsoft/Win32/RegistryKeyPermissionCheck.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace Microsoft.Win32 { public enum RegistryKeyPermissionCheck { Default = 0, ReadSubTree = 1, ReadWriteSubTree = 2, } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace Microsoft.Win32 { public enum RegistryKeyPermissionCheck { Default = 0, ReadSubTree = 1, ReadWriteSubTree = 2, } }
-1
dotnet/runtime
66,110
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client
Fixes https://github.com/dotnet/runtime/issues/66099
mdh1418
2022-03-02T21:21:59Z
2022-03-03T01:22:51Z
48b6648e2f8ac01b24f26fc563d831f408e14795
73471b51fb55198bc089f342cd75e077cc4762a8
[tests] Update eventsvalidation tests to use intree Microsoft.Diagnostics.NETCore.Client. Fixes https://github.com/dotnet/runtime/issues/66099
./src/tests/JIT/Regression/CLR-x86-JIT/V1-M12-Beta2/b52840/b52840.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/jit/lsra.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX Linear Scan Register Allocation a.k.a. LSRA Preconditions - All register requirements are expressed in the code stream, either as destination registers of tree nodes, or as internal registers. These requirements are expressed in the RefPositions built for each node by BuildNode(), which includes: - The register uses and definitions. - The register restrictions (candidates) of the target register, both from itself, as producer of the value (dstCandidates), and from its consuming node (srcCandidates). Note that when we talk about srcCandidates we are referring to the destination register (not any of its sources). - The number (internalCount) of registers required, and their register restrictions (internalCandidates). These are neither inputs nor outputs of the node, but used in the sequence of code generated for the tree. "Internal registers" are registers used during the code sequence generated for the node. The register lifetimes must obey the following lifetime model: - First, any internal registers are defined. - Next, any source registers are used (and are then freed if they are last use and are not identified as "delayRegFree"). - Next, the internal registers are used (and are then freed). - Next, any registers in the kill set for the instruction are killed. - Next, the destination register(s) are defined (multiple destination registers are only supported on ARM) - Finally, any "delayRegFree" source registers are freed. There are several things to note about this order: - The internal registers will never overlap any use, but they may overlap a destination register. - Internal registers are never live beyond the node. - The "delayRegFree" annotation is used for instructions that are only available in a Read-Modify-Write form. That is, the destination register is one of the sources. In this case, we must not use the same register for the non-RMW operand as for the destination. Overview (doLinearScan): - Walk all blocks, building intervals and RefPositions (buildIntervals) - Allocate registers (allocateRegisters) - Annotate nodes with register assignments (resolveRegisters) - Add move nodes as needed to resolve conflicting register assignments across non-adjacent edges. (resolveEdges, called from resolveRegisters) Postconditions: Tree nodes (GenTree): - GenTree::GetRegNum() (and gtRegPair for ARM) is annotated with the register assignment for a node. If the node does not require a register, it is annotated as such (GetRegNum() = REG_NA). For a variable definition or interior tree node (an "implicit" definition), this is the register to put the result. For an expression use, this is the place to find the value that has previously been computed. - In most cases, this register must satisfy the constraints specified for the RefPosition. - In some cases, this is difficult: - If a lclVar node currently lives in some register, it may not be desirable to move it (i.e. its current location may be desirable for future uses, e.g. if it's a callee save register, but needs to be in a specific arg register for a call). - In other cases there may be conflicts on the restrictions placed by the defining node and the node which consumes it - If such a node is constrained to a single fixed register (e.g. an arg register, or a return from a call), then LSRA is free to annotate the node with a different register. The code generator must issue the appropriate move. - However, if such a node is constrained to a set of registers, and its current location does not satisfy that requirement, LSRA must insert a GT_COPY node between the node and its parent. The GetRegNum() on the GT_COPY node must satisfy the register requirement of the parent. - GenTree::gtRsvdRegs has a set of registers used for internal temps. - A tree node is marked GTF_SPILL if the tree node must be spilled by the code generator after it has been evaluated. - LSRA currently does not set GTF_SPILLED on such nodes, because it caused problems in the old code generator. In the new backend perhaps this should change (see also the note below under CodeGen). - A tree node is marked GTF_SPILLED if it is a lclVar that must be reloaded prior to use. - The register (GetRegNum()) on the node indicates the register to which it must be reloaded. - For lclVar nodes, since the uses and defs are distinct tree nodes, it is always possible to annotate the node with the register to which the variable must be reloaded. - For other nodes, since they represent both the def and use, if the value must be reloaded to a different register, LSRA must insert a GT_RELOAD node in order to specify the register to which it should be reloaded. Local variable table (LclVarDsc): - LclVarDsc::lvRegister is set to true if a local variable has the same register assignment for its entire lifetime. - LclVarDsc::lvRegNum / GetOtherReg(): these are initialized to their first value at the end of LSRA (it looks like GetOtherReg() isn't? This is probably a bug (ARM)). Codegen will set them to their current value as it processes the trees, since a variable can (now) be assigned different registers over its lifetimes. XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "lsra.h" #ifdef DEBUG const char* LinearScan::resolveTypeName[] = {"Split", "Join", "Critical", "SharedCritical"}; #endif // DEBUG /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Small Helper functions XX XX XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ //-------------------------------------------------------------- // lsraAssignRegToTree: Assign the given reg to tree node. // // Arguments: // tree - Gentree node // reg - register to be assigned // regIdx - register idx, if tree is a multi-reg call node. // regIdx will be zero for single-reg result producing tree nodes. // // Return Value: // None // void lsraAssignRegToTree(GenTree* tree, regNumber reg, unsigned regIdx) { if (regIdx == 0) { tree->SetRegNum(reg); } #if !defined(TARGET_64BIT) else if (tree->OperIsMultiRegOp()) { assert(regIdx == 1); GenTreeMultiRegOp* mul = tree->AsMultiRegOp(); mul->gtOtherReg = reg; } #endif // TARGET_64BIT #if FEATURE_MULTIREG_RET else if (tree->OperGet() == GT_COPY) { assert(regIdx == 1); GenTreeCopyOrReload* copy = tree->AsCopyOrReload(); copy->gtOtherRegs[0] = (regNumberSmall)reg; } #endif // FEATURE_MULTIREG_RET #if FEATURE_ARG_SPLIT else if (tree->OperIsPutArgSplit()) { GenTreePutArgSplit* putArg = tree->AsPutArgSplit(); putArg->SetRegNumByIdx(reg, regIdx); } #endif // FEATURE_ARG_SPLIT #ifdef FEATURE_HW_INTRINSICS else if (tree->OperIs(GT_HWINTRINSIC)) { assert(regIdx == 1); // TODO-ARM64-NYI: Support hardware intrinsics operating on multiple contiguous registers. tree->AsHWIntrinsic()->SetOtherReg(reg); } #endif // FEATURE_HW_INTRINSICS else if (tree->OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR)) { tree->AsLclVar()->SetRegNumByIdx(reg, regIdx); } else { assert(tree->IsMultiRegCall()); GenTreeCall* call = tree->AsCall(); call->SetRegNumByIdx(reg, regIdx); } } //------------------------------------------------------------- // getWeight: Returns the weight of the RefPosition. // // Arguments: // refPos - ref position // // Returns: // Weight of ref position. weight_t LinearScan::getWeight(RefPosition* refPos) { weight_t weight; GenTree* treeNode = refPos->treeNode; if (treeNode != nullptr) { if (isCandidateLocalRef(treeNode)) { // Tracked locals: use weighted ref cnt as the weight of the // ref position. const LclVarDsc* varDsc = compiler->lvaGetDesc(treeNode->AsLclVarCommon()); weight = varDsc->lvRefCntWtd(); if (refPos->getInterval()->isSpilled) { // Decrease the weight if the interval has already been spilled. if (varDsc->lvLiveInOutOfHndlr || refPos->getInterval()->firstRefPosition->singleDefSpill) { // An EH-var/single-def is always spilled at defs, and we'll decrease the weight by half, // since only the reload is needed. weight = weight / 2; } else { weight -= BB_UNITY_WEIGHT; } } } else { // Non-candidate local ref or non-lcl tree node. // These are considered to have two references in the basic block: // a def and a use and hence weighted ref count would be 2 times // the basic block weight in which they appear. // However, it is generally more harmful to spill tree temps, so we // double that. const unsigned TREE_TEMP_REF_COUNT = 2; const unsigned TREE_TEMP_BOOST_FACTOR = 2; weight = TREE_TEMP_REF_COUNT * TREE_TEMP_BOOST_FACTOR * blockInfo[refPos->bbNum].weight; } } else { // Non-tree node ref positions. These will have a single // reference in the basic block and hence their weighted // refcount is equal to the block weight in which they // appear. weight = blockInfo[refPos->bbNum].weight; } return weight; } // allRegs represents a set of registers that can // be used to allocate the specified type in any point // in time (more of a 'bank' of registers). regMaskTP LinearScan::allRegs(RegisterType rt) { assert((rt != TYP_UNDEF) && (rt != TYP_STRUCT)); if (rt == TYP_FLOAT) { return availableFloatRegs; } else if (rt == TYP_DOUBLE) { return availableDoubleRegs; } #ifdef FEATURE_SIMD // TODO-Cleanup: Add an RBM_ALLSIMD else if (varTypeIsSIMD(rt)) { return availableDoubleRegs; } #endif // FEATURE_SIMD else { return availableIntRegs; } } regMaskTP LinearScan::allByteRegs() { #ifdef TARGET_X86 return availableIntRegs & RBM_BYTE_REGS; #else return availableIntRegs; #endif } regMaskTP LinearScan::allSIMDRegs() { return availableFloatRegs; } void LinearScan::updateNextFixedRef(RegRecord* regRecord, RefPosition* nextRefPosition) { LsraLocation nextLocation; if (nextRefPosition == nullptr) { nextLocation = MaxLocation; fixedRegs &= ~genRegMask(regRecord->regNum); } else { nextLocation = nextRefPosition->nodeLocation; fixedRegs |= genRegMask(regRecord->regNum); } nextFixedRef[regRecord->regNum] = nextLocation; } regMaskTP LinearScan::getMatchingConstants(regMaskTP mask, Interval* currentInterval, RefPosition* refPosition) { assert(currentInterval->isConstant && RefTypeIsDef(refPosition->refType)); regMaskTP candidates = (mask & m_RegistersWithConstants); regMaskTP result = RBM_NONE; while (candidates != RBM_NONE) { regMaskTP candidateBit = genFindLowestBit(candidates); candidates &= ~candidateBit; regNumber regNum = genRegNumFromMask(candidateBit); RegRecord* physRegRecord = getRegisterRecord(regNum); if (isMatchingConstant(physRegRecord, refPosition)) { result |= candidateBit; } } return result; } void LinearScan::clearNextIntervalRef(regNumber reg, var_types regType) { nextIntervalRef[reg] = MaxLocation; #ifdef TARGET_ARM if (regType == TYP_DOUBLE) { assert(genIsValidDoubleReg(reg)); regNumber otherReg = REG_NEXT(reg); nextIntervalRef[otherReg] = MaxLocation; } #endif } void LinearScan::clearSpillCost(regNumber reg, var_types regType) { spillCost[reg] = 0; #ifdef TARGET_ARM if (regType == TYP_DOUBLE) { assert(genIsValidDoubleReg(reg)); regNumber otherReg = REG_NEXT(reg); spillCost[otherReg] = 0; } #endif } void LinearScan::updateNextIntervalRef(regNumber reg, Interval* interval) { LsraLocation nextRefLocation = interval->getNextRefLocation(); nextIntervalRef[reg] = nextRefLocation; #ifdef TARGET_ARM if (interval->registerType == TYP_DOUBLE) { regNumber otherReg = REG_NEXT(reg); nextIntervalRef[otherReg] = nextRefLocation; } #endif } void LinearScan::updateSpillCost(regNumber reg, Interval* interval) { // An interval can have no recentRefPosition if this is the initial assignment // of a parameter to its home register. weight_t cost = (interval->recentRefPosition != nullptr) ? getWeight(interval->recentRefPosition) : 0; spillCost[reg] = cost; #ifdef TARGET_ARM if (interval->registerType == TYP_DOUBLE) { regNumber otherReg = REG_NEXT(reg); spillCost[otherReg] = cost; } #endif } //------------------------------------------------------------------------ // internalFloatRegCandidates: Return the set of registers that are appropriate // for use as internal float registers. // // Return Value: // The set of registers (as a regMaskTP). // // Notes: // compFloatingPointUsed is only required to be set if it is possible that we // will use floating point callee-save registers. // It is unlikely, if an internal register is the only use of floating point, // that it will select a callee-save register. But to be safe, we restrict // the set of candidates if compFloatingPointUsed is not already set. regMaskTP LinearScan::internalFloatRegCandidates() { if (compiler->compFloatingPointUsed) { return allRegs(TYP_FLOAT); } else { return RBM_FLT_CALLEE_TRASH; } } bool LinearScan::isFree(RegRecord* regRecord) { return ((regRecord->assignedInterval == nullptr || !regRecord->assignedInterval->isActive) && !isRegBusy(regRecord->regNum, regRecord->registerType)); } RegRecord* LinearScan::getRegisterRecord(regNumber regNum) { assert((unsigned)regNum < ArrLen(physRegs)); return &physRegs[regNum]; } #ifdef DEBUG //---------------------------------------------------------------------------- // getConstrainedRegMask: Returns new regMask which is the intersection of // regMaskActual and regMaskConstraint if the new regMask has at least // minRegCount registers, otherwise returns regMaskActual. // // Arguments: // regMaskActual - regMask that needs to be constrained // regMaskConstraint - regMask constraint that needs to be // applied to regMaskActual // minRegCount - Minimum number of regs that should be // be present in new regMask. // // Return Value: // New regMask that has minRegCount registers after instersection. // Otherwise returns regMaskActual. regMaskTP LinearScan::getConstrainedRegMask(regMaskTP regMaskActual, regMaskTP regMaskConstraint, unsigned minRegCount) { regMaskTP newMask = regMaskActual & regMaskConstraint; if (genCountBits(newMask) >= minRegCount) { return newMask; } return regMaskActual; } //------------------------------------------------------------------------ // stressLimitRegs: Given a set of registers, expressed as a register mask, reduce // them based on the current stress options. // // Arguments: // mask - The current mask of register candidates for a node // // Return Value: // A possibly-modified mask, based on the value of COMPlus_JitStressRegs. // // Notes: // This is the method used to implement the stress options that limit // the set of registers considered for allocation. regMaskTP LinearScan::stressLimitRegs(RefPosition* refPosition, regMaskTP mask) { if (getStressLimitRegs() != LSRA_LIMIT_NONE) { // The refPosition could be null, for example when called // by getTempRegForResolution(). int minRegCount = (refPosition != nullptr) ? refPosition->minRegCandidateCount : 1; switch (getStressLimitRegs()) { case LSRA_LIMIT_CALLEE: if (!compiler->opts.compDbgEnC) { mask = getConstrainedRegMask(mask, RBM_CALLEE_SAVED, minRegCount); } break; case LSRA_LIMIT_CALLER: { mask = getConstrainedRegMask(mask, RBM_CALLEE_TRASH, minRegCount); } break; case LSRA_LIMIT_SMALL_SET: if ((mask & LsraLimitSmallIntSet) != RBM_NONE) { mask = getConstrainedRegMask(mask, LsraLimitSmallIntSet, minRegCount); } else if ((mask & LsraLimitSmallFPSet) != RBM_NONE) { mask = getConstrainedRegMask(mask, LsraLimitSmallFPSet, minRegCount); } break; default: unreached(); } if (refPosition != nullptr && refPosition->isFixedRegRef) { mask |= refPosition->registerAssignment; } } return mask; } #endif // DEBUG //------------------------------------------------------------------------ // conflictingFixedRegReference: Determine whether the 'reg' has a // fixed register use that conflicts with 'refPosition' // // Arguments: // regNum - The register of interest // refPosition - The RefPosition of interest // // Return Value: // Returns true iff the given RefPosition is NOT a fixed use of this register, // AND either: // - there is a RefPosition on this RegRecord at the nodeLocation of the given RefPosition, or // - the given RefPosition has a delayRegFree, and there is a RefPosition on this RegRecord at // the nodeLocation just past the given RefPosition. // // Assumptions: // 'refPosition is non-null. bool LinearScan::conflictingFixedRegReference(regNumber regNum, RefPosition* refPosition) { // Is this a fixed reference of this register? If so, there is no conflict. if (refPosition->isFixedRefOfRegMask(genRegMask(regNum))) { return false; } // Otherwise, check for conflicts. // There is a conflict if: // 1. There is a recent RefPosition on this RegRecord that is at this location, OR // 2. There is an upcoming RefPosition at this location, or at the next location // if refPosition is a delayed use (i.e. must be kept live through the next/def location). LsraLocation refLocation = refPosition->nodeLocation; RegRecord* regRecord = getRegisterRecord(regNum); if (isRegInUse(regNum, refPosition->getInterval()->registerType) && (regRecord->assignedInterval != refPosition->getInterval())) { return true; } LsraLocation nextPhysRefLocation = nextFixedRef[regNum]; if (nextPhysRefLocation == refLocation || (refPosition->delayRegFree && nextPhysRefLocation == (refLocation + 1))) { return true; } return false; } /***************************************************************************** * Inline functions for Interval *****************************************************************************/ RefPosition* Referenceable::getNextRefPosition() { if (recentRefPosition == nullptr) { return firstRefPosition; } else { return recentRefPosition->nextRefPosition; } } LsraLocation Referenceable::getNextRefLocation() { RefPosition* nextRefPosition = getNextRefPosition(); if (nextRefPosition == nullptr) { return MaxLocation; } else { return nextRefPosition->nodeLocation; } } #ifdef DEBUG void LinearScan::dumpVarToRegMap(VarToRegMap map) { bool anyPrinted = false; for (unsigned varIndex = 0; varIndex < compiler->lvaTrackedCount; varIndex++) { if (map[varIndex] != REG_STK) { printf("V%02u=%s ", compiler->lvaTrackedIndexToLclNum(varIndex), getRegName(map[varIndex])); anyPrinted = true; } } if (!anyPrinted) { printf("none"); } printf("\n"); } void LinearScan::dumpInVarToRegMap(BasicBlock* block) { printf("Var=Reg beg of " FMT_BB ": ", block->bbNum); VarToRegMap map = getInVarToRegMap(block->bbNum); dumpVarToRegMap(map); } void LinearScan::dumpOutVarToRegMap(BasicBlock* block) { printf("Var=Reg end of " FMT_BB ": ", block->bbNum); VarToRegMap map = getOutVarToRegMap(block->bbNum); dumpVarToRegMap(map); } #endif // DEBUG LinearScanInterface* getLinearScanAllocator(Compiler* comp) { return new (comp, CMK_LSRA) LinearScan(comp); } //------------------------------------------------------------------------ // LSRA constructor // // Arguments: // theCompiler // // Notes: // The constructor takes care of initializing the data structures that are used // during Lowering, including (in DEBUG) getting the stress environment variables, // as they may affect the block ordering. LinearScan::LinearScan(Compiler* theCompiler) : compiler(theCompiler) , intervals(theCompiler->getAllocator(CMK_LSRA_Interval)) , allocationPassComplete(false) , refPositions(theCompiler->getAllocator(CMK_LSRA_RefPosition)) , listNodePool(theCompiler) { regSelector = new (theCompiler, CMK_LSRA) RegisterSelection(this); firstColdLoc = MaxLocation; #ifdef DEBUG maxNodeLocation = 0; activeRefPosition = nullptr; // Get the value of the environment variable that controls stress for register allocation lsraStressMask = JitConfig.JitStressRegs(); #if 0 if (lsraStressMask != 0) { // The code in this #if can be used to debug JitStressRegs issues according to // method hash or method count. // To use, simply set environment variables: // JitStressRegsHashLo and JitStressRegsHashHi to set the range of method hash, or // JitStressRegsStart and JitStressRegsEnd to set the range of method count // (Compiler::jitTotalMethodCount as reported by COMPlus_DumpJittedMethods). unsigned methHash = compiler->info.compMethodHash(); char* lostr = getenv("JitStressRegsHashLo"); unsigned methHashLo = 0; bool dump = false; if (lostr != nullptr) { sscanf_s(lostr, "%x", &methHashLo); dump = true; } char* histr = getenv("JitStressRegsHashHi"); unsigned methHashHi = UINT32_MAX; if (histr != nullptr) { sscanf_s(histr, "%x", &methHashHi); dump = true; } if (methHash < methHashLo || methHash > methHashHi) { lsraStressMask = 0; } // Check method count unsigned count = Compiler::jitTotalMethodCompiled; unsigned start = 0; unsigned end = UINT32_MAX; char* startStr = getenv("JitStressRegsStart"); char* endStr = getenv("JitStressRegsEnd"); if (startStr != nullptr) { sscanf_s(startStr, "%d", &start); dump = true; } if (endStr != nullptr) { sscanf_s(endStr, "%d", &end); dump = true; } if (count < start || (count > end)) { lsraStressMask = 0; } if ((lsraStressMask != 0) && (dump == true)) { printf("JitStressRegs = %x for method %d: %s, hash = 0x%x.\n", lsraStressMask, Compiler::jitTotalMethodCompiled, compiler->info.compFullName, compiler->info.compMethodHash()); printf(""); // flush } } #endif // 0 #endif // DEBUG // Assume that we will enregister local variables if it's not disabled. We'll reset it if we // have no tracked locals when we start allocating. Note that new tracked lclVars may be added // after the first liveness analysis - either by optimizations or by Lowering, and the tracked // set won't be recomputed until after Lowering (and this constructor is called prior to Lowering), // so we don't want to check that yet. enregisterLocalVars = compiler->compEnregLocals(); #ifdef TARGET_ARM64 availableIntRegs = (RBM_ALLINT & ~(RBM_PR | RBM_FP | RBM_LR) & ~compiler->codeGen->regSet.rsMaskResvd); #else availableIntRegs = (RBM_ALLINT & ~compiler->codeGen->regSet.rsMaskResvd); #endif #if ETW_EBP_FRAMED availableIntRegs &= ~RBM_FPBASE; #endif // ETW_EBP_FRAMED availableFloatRegs = RBM_ALLFLOAT; availableDoubleRegs = RBM_ALLDOUBLE; #ifdef TARGET_AMD64 if (compiler->opts.compDbgEnC) { // On x64 when the EnC option is set, we always save exactly RBP, RSI and RDI. // RBP is not available to the register allocator, so RSI and RDI are the only // callee-save registers available. availableIntRegs &= ~RBM_CALLEE_SAVED | RBM_RSI | RBM_RDI; availableFloatRegs &= ~RBM_CALLEE_SAVED; availableDoubleRegs &= ~RBM_CALLEE_SAVED; } #endif // TARGET_AMD64 compiler->rpFrameType = FT_NOT_SET; compiler->rpMustCreateEBPCalled = false; compiler->codeGen->intRegState.rsIsFloat = false; compiler->codeGen->floatRegState.rsIsFloat = true; // Block sequencing (the order in which we schedule). // Note that we don't initialize the bbVisitedSet until we do the first traversal // This is so that any blocks that are added during the first traversal // are accounted for (and we don't have BasicBlockEpoch issues). blockSequencingDone = false; blockSequence = nullptr; blockSequenceWorkList = nullptr; curBBSeqNum = 0; bbSeqCount = 0; // Information about each block, including predecessor blocks used for variable locations at block entry. blockInfo = nullptr; pendingDelayFree = false; tgtPrefUse = nullptr; } //------------------------------------------------------------------------ // getNextCandidateFromWorkList: Get the next candidate for block sequencing // // Arguments: // None. // // Return Value: // The next block to be placed in the sequence. // // Notes: // This method currently always returns the next block in the list, and relies on having // blocks added to the list only when they are "ready", and on the // addToBlockSequenceWorkList() method to insert them in the proper order. // However, a block may be in the list and already selected, if it was subsequently // encountered as both a flow and layout successor of the most recently selected // block. BasicBlock* LinearScan::getNextCandidateFromWorkList() { BasicBlockList* nextWorkList = nullptr; for (BasicBlockList* workList = blockSequenceWorkList; workList != nullptr; workList = nextWorkList) { nextWorkList = workList->next; BasicBlock* candBlock = workList->block; removeFromBlockSequenceWorkList(workList, nullptr); if (!isBlockVisited(candBlock)) { return candBlock; } } return nullptr; } //------------------------------------------------------------------------ // setBlockSequence: Determine the block order for register allocation. // // Arguments: // None // // Return Value: // None // // Notes: // On return, the blockSequence array contains the blocks, in the order in which they // will be allocated. // This method clears the bbVisitedSet on LinearScan, and when it returns the set // contains all the bbNums for the block. void LinearScan::setBlockSequence() { assert(!blockSequencingDone); // The method should be called only once. compiler->EnsureBasicBlockEpoch(); #ifdef DEBUG blockEpoch = compiler->GetCurBasicBlockEpoch(); #endif // DEBUG // Initialize the "visited" blocks set. bbVisitedSet = BlockSetOps::MakeEmpty(compiler); BlockSet readySet(BlockSetOps::MakeEmpty(compiler)); BlockSet predSet(BlockSetOps::MakeEmpty(compiler)); assert(blockSequence == nullptr && bbSeqCount == 0); blockSequence = new (compiler, CMK_LSRA) BasicBlock*[compiler->fgBBcount]; bbNumMaxBeforeResolution = compiler->fgBBNumMax; blockInfo = new (compiler, CMK_LSRA) LsraBlockInfo[bbNumMaxBeforeResolution + 1]; assert(blockSequenceWorkList == nullptr); verifiedAllBBs = false; hasCriticalEdges = false; BasicBlock* nextBlock; // We use a bbNum of 0 for entry RefPositions. // The other information in blockInfo[0] will never be used. blockInfo[0].weight = BB_UNITY_WEIGHT; #if TRACK_LSRA_STATS for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++) { blockInfo[0].stats[statIndex] = 0; } #endif // TRACK_LSRA_STATS JITDUMP("Start LSRA Block Sequence: \n"); for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = nextBlock) { JITDUMP("Current block: " FMT_BB "\n", block->bbNum); blockSequence[bbSeqCount] = block; markBlockVisited(block); bbSeqCount++; nextBlock = nullptr; // Initialize the blockInfo. // predBBNum will be set later. // 0 is never used as a bbNum, but is used in blockInfo to designate an exception entry block. blockInfo[block->bbNum].predBBNum = 0; // We check for critical edges below, but initialize to false. blockInfo[block->bbNum].hasCriticalInEdge = false; blockInfo[block->bbNum].hasCriticalOutEdge = false; blockInfo[block->bbNum].weight = block->getBBWeight(compiler); blockInfo[block->bbNum].hasEHBoundaryIn = block->hasEHBoundaryIn(); blockInfo[block->bbNum].hasEHBoundaryOut = block->hasEHBoundaryOut(); blockInfo[block->bbNum].hasEHPred = false; #if TRACK_LSRA_STATS for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++) { blockInfo[block->bbNum].stats[statIndex] = 0; } #endif // TRACK_LSRA_STATS // We treat BBCallAlwaysPairTail blocks as having EH flow, since we can't // insert resolution moves into those blocks. if (block->isBBCallAlwaysPairTail()) { blockInfo[block->bbNum].hasEHBoundaryIn = true; blockInfo[block->bbNum].hasEHBoundaryOut = true; } bool hasUniquePred = (block->GetUniquePred(compiler) != nullptr); for (BasicBlock* const predBlock : block->PredBlocks()) { if (!hasUniquePred) { if (predBlock->NumSucc(compiler) > 1) { blockInfo[block->bbNum].hasCriticalInEdge = true; hasCriticalEdges = true; } else if (predBlock->bbJumpKind == BBJ_SWITCH) { assert(!"Switch with single successor"); } } if (!block->isBBCallAlwaysPairTail() && (predBlock->hasEHBoundaryOut() || predBlock->isBBCallAlwaysPairTail())) { assert(!block->isBBCallAlwaysPairTail()); if (hasUniquePred) { // A unique pred with an EH out edge won't allow us to keep any variables enregistered. blockInfo[block->bbNum].hasEHBoundaryIn = true; } else { blockInfo[block->bbNum].hasEHPred = true; } } } // Determine which block to schedule next. // First, update the NORMAL successors of the current block, adding them to the worklist // according to the desired order. We will handle the EH successors below. const unsigned numSuccs = block->NumSucc(compiler); bool checkForCriticalOutEdge = (numSuccs > 1); if (!checkForCriticalOutEdge && block->bbJumpKind == BBJ_SWITCH) { assert(!"Switch with single successor"); } for (unsigned succIndex = 0; succIndex < numSuccs; succIndex++) { BasicBlock* succ = block->GetSucc(succIndex, compiler); if (checkForCriticalOutEdge && succ->GetUniquePred(compiler) == nullptr) { blockInfo[block->bbNum].hasCriticalOutEdge = true; hasCriticalEdges = true; // We can stop checking now. checkForCriticalOutEdge = false; } if (isTraversalLayoutOrder() || isBlockVisited(succ)) { continue; } // We've now seen a predecessor, so add it to the work list and the "readySet". // It will be inserted in the worklist according to the specified traversal order // (i.e. pred-first or random, since layout order is handled above). if (!BlockSetOps::IsMember(compiler, readySet, succ->bbNum)) { JITDUMP("\tSucc block: " FMT_BB, succ->bbNum); addToBlockSequenceWorkList(readySet, succ, predSet); BlockSetOps::AddElemD(compiler, readySet, succ->bbNum); } } // For layout order, simply use bbNext if (isTraversalLayoutOrder()) { nextBlock = block->bbNext; continue; } while (nextBlock == nullptr) { nextBlock = getNextCandidateFromWorkList(); // TODO-Throughput: We would like to bypass this traversal if we know we've handled all // the blocks - but fgBBcount does not appear to be updated when blocks are removed. if (nextBlock == nullptr /* && bbSeqCount != compiler->fgBBcount*/ && !verifiedAllBBs) { // If we don't encounter all blocks by traversing the regular successor links, do a full // traversal of all the blocks, and add them in layout order. // This may include: // - internal-only blocks which may not be in the flow graph // - blocks that have become unreachable due to optimizations, but that are strongly // connected (these are not removed) // - EH blocks for (BasicBlock* const seqBlock : compiler->Blocks()) { if (!isBlockVisited(seqBlock)) { JITDUMP("\tUnvisited block: " FMT_BB, seqBlock->bbNum); addToBlockSequenceWorkList(readySet, seqBlock, predSet); BlockSetOps::AddElemD(compiler, readySet, seqBlock->bbNum); } } verifiedAllBBs = true; } else { break; } } } blockSequencingDone = true; #ifdef DEBUG // Make sure that we've visited all the blocks. for (BasicBlock* const block : compiler->Blocks()) { assert(isBlockVisited(block)); } JITDUMP("Final LSRA Block Sequence: \n"); int i = 1; for (BasicBlock *block = startBlockSequence(); block != nullptr; ++i, block = moveToNextBlock()) { JITDUMP(FMT_BB, block->bbNum); JITDUMP("(%6s) ", refCntWtd2str(block->getBBWeight(compiler))); if (blockInfo[block->bbNum].hasEHBoundaryIn) { JITDUMP(" EH-in"); } if (blockInfo[block->bbNum].hasEHBoundaryOut) { JITDUMP(" EH-out"); } if (blockInfo[block->bbNum].hasEHPred) { JITDUMP(" has EH pred"); } JITDUMP("\n"); } JITDUMP("\n"); #endif } //------------------------------------------------------------------------ // compareBlocksForSequencing: Compare two basic blocks for sequencing order. // // Arguments: // block1 - the first block for comparison // block2 - the second block for comparison // useBlockWeights - whether to use block weights for comparison // // Return Value: // -1 if block1 is preferred. // 0 if the blocks are equivalent. // 1 if block2 is preferred. // // Notes: // See addToBlockSequenceWorkList. int LinearScan::compareBlocksForSequencing(BasicBlock* block1, BasicBlock* block2, bool useBlockWeights) { if (useBlockWeights) { weight_t weight1 = block1->getBBWeight(compiler); weight_t weight2 = block2->getBBWeight(compiler); if (weight1 > weight2) { return -1; } else if (weight1 < weight2) { return 1; } } // If weights are the same prefer LOWER bbnum if (block1->bbNum < block2->bbNum) { return -1; } else if (block1->bbNum == block2->bbNum) { return 0; } else { return 1; } } //------------------------------------------------------------------------ // addToBlockSequenceWorkList: Add a BasicBlock to the work list for sequencing. // // Arguments: // sequencedBlockSet - the set of blocks that are already sequenced // block - the new block to be added // predSet - the buffer to save predecessors set. A block set allocated by the caller used here as a // temporary block set for constructing a predecessor set. Allocated by the caller to avoid reallocating a new block // set with every call to this function // // Return Value: // None. // // Notes: // The first block in the list will be the next one to be sequenced, as soon // as we encounter a block whose successors have all been sequenced, in pred-first // order, or the very next block if we are traversing in random order (once implemented). // This method uses a comparison method to determine the order in which to place // the blocks in the list. This method queries whether all predecessors of the // block are sequenced at the time it is added to the list and if so uses block weights // for inserting the block. A block is never inserted ahead of its predecessors. // A block at the time of insertion may not have all its predecessors sequenced, in // which case it will be sequenced based on its block number. Once a block is inserted, // its priority\order will not be changed later once its remaining predecessors are // sequenced. This would mean that work list may not be sorted entirely based on // block weights alone. // // Note also that, when random traversal order is implemented, this method // should insert the blocks into the list in random order, so that we can always // simply select the first block in the list. void LinearScan::addToBlockSequenceWorkList(BlockSet sequencedBlockSet, BasicBlock* block, BlockSet& predSet) { // The block that is being added is not already sequenced assert(!BlockSetOps::IsMember(compiler, sequencedBlockSet, block->bbNum)); // Get predSet of block BlockSetOps::ClearD(compiler, predSet); for (BasicBlock* const predBlock : block->PredBlocks()) { BlockSetOps::AddElemD(compiler, predSet, predBlock->bbNum); } // If either a rarely run block or all its preds are already sequenced, use block's weight to sequence bool useBlockWeight = block->isRunRarely() || BlockSetOps::IsSubset(compiler, sequencedBlockSet, predSet); JITDUMP(", Criteria: %s", useBlockWeight ? "weight" : "bbNum"); BasicBlockList* prevNode = nullptr; BasicBlockList* nextNode = blockSequenceWorkList; while (nextNode != nullptr) { int seqResult; if (nextNode->block->isRunRarely()) { // If the block that is yet to be sequenced is a rarely run block, always use block weights for sequencing seqResult = compareBlocksForSequencing(nextNode->block, block, true); } else if (BlockSetOps::IsMember(compiler, predSet, nextNode->block->bbNum)) { // always prefer unsequenced pred blocks seqResult = -1; } else { seqResult = compareBlocksForSequencing(nextNode->block, block, useBlockWeight); } if (seqResult > 0) { break; } prevNode = nextNode; nextNode = nextNode->next; } BasicBlockList* newListNode = new (compiler, CMK_LSRA) BasicBlockList(block, nextNode); if (prevNode == nullptr) { blockSequenceWorkList = newListNode; } else { prevNode->next = newListNode; } #ifdef DEBUG nextNode = blockSequenceWorkList; JITDUMP(", Worklist: ["); while (nextNode != nullptr) { JITDUMP(FMT_BB " ", nextNode->block->bbNum); nextNode = nextNode->next; } JITDUMP("]\n"); #endif } void LinearScan::removeFromBlockSequenceWorkList(BasicBlockList* listNode, BasicBlockList* prevNode) { if (listNode == blockSequenceWorkList) { assert(prevNode == nullptr); blockSequenceWorkList = listNode->next; } else { assert(prevNode != nullptr && prevNode->next == listNode); prevNode->next = listNode->next; } // TODO-Cleanup: consider merging Compiler::BlockListNode and BasicBlockList // compiler->FreeBlockListNode(listNode); } // Initialize the block order for allocation (called each time a new traversal begins). BasicBlock* LinearScan::startBlockSequence() { if (!blockSequencingDone) { setBlockSequence(); } else { clearVisitedBlocks(); } BasicBlock* curBB = compiler->fgFirstBB; curBBSeqNum = 0; curBBNum = curBB->bbNum; assert(blockSequence[0] == compiler->fgFirstBB); markBlockVisited(curBB); return curBB; } //------------------------------------------------------------------------ // moveToNextBlock: Move to the next block in order for allocation or resolution. // // Arguments: // None // // Return Value: // The next block. // // Notes: // This method is used when the next block is actually going to be handled. // It changes curBBNum. BasicBlock* LinearScan::moveToNextBlock() { BasicBlock* nextBlock = getNextBlock(); curBBSeqNum++; if (nextBlock != nullptr) { curBBNum = nextBlock->bbNum; } return nextBlock; } //------------------------------------------------------------------------ // getNextBlock: Get the next block in order for allocation or resolution. // // Arguments: // None // // Return Value: // The next block. // // Notes: // This method does not actually change the current block - it is used simply // to determine which block will be next. BasicBlock* LinearScan::getNextBlock() { assert(blockSequencingDone); unsigned int nextBBSeqNum = curBBSeqNum + 1; if (nextBBSeqNum < bbSeqCount) { return blockSequence[nextBBSeqNum]; } return nullptr; } //------------------------------------------------------------------------ // doLinearScan: The main method for register allocation. // // Arguments: // None // // Return Value: // None. // void LinearScan::doLinearScan() { // Check to see whether we have any local variables to enregister. // We initialize this in the constructor based on opt settings, // but we don't want to spend time on the lclVar parts of LinearScan // if we have no tracked locals. if (enregisterLocalVars && (compiler->lvaTrackedCount == 0)) { enregisterLocalVars = false; } splitBBNumToTargetBBNumMap = nullptr; // This is complicated by the fact that physical registers have refs associated // with locations where they are killed (e.g. calls), but we don't want to // count these as being touched. compiler->codeGen->regSet.rsClearRegsModified(); initMaxSpill(); buildIntervals(); DBEXEC(VERBOSE, TupleStyleDump(LSRA_DUMP_REFPOS)); compiler->EndPhase(PHASE_LINEAR_SCAN_BUILD); DBEXEC(VERBOSE, lsraDumpIntervals("after buildIntervals")); initVarRegMaps(); allocateRegisters(); allocationPassComplete = true; compiler->EndPhase(PHASE_LINEAR_SCAN_ALLOC); resolveRegisters(); compiler->EndPhase(PHASE_LINEAR_SCAN_RESOLVE); assert(blockSequencingDone); // Should do at least one traversal. assert(blockEpoch == compiler->GetCurBasicBlockEpoch()); #if TRACK_LSRA_STATS if ((JitConfig.DisplayLsraStats() == 1) #ifdef DEBUG || VERBOSE #endif ) { dumpLsraStats(jitstdout); } #endif // TRACK_LSRA_STATS DBEXEC(VERBOSE, TupleStyleDump(LSRA_DUMP_POST)); #ifdef DEBUG compiler->fgDebugCheckLinks(); #endif compiler->compLSRADone = true; } //------------------------------------------------------------------------ // recordVarLocationsAtStartOfBB: Update live-in LclVarDscs with the appropriate // register location at the start of a block, during codegen. // // Arguments: // bb - the block for which code is about to be generated. // // Return Value: // None. // // Assumptions: // CodeGen will take care of updating the reg masks and the current var liveness, // after calling this method. // This is because we need to kill off the dead registers before setting the newly live ones. void LinearScan::recordVarLocationsAtStartOfBB(BasicBlock* bb) { if (!enregisterLocalVars) { return; } JITDUMP("Recording Var Locations at start of " FMT_BB "\n", bb->bbNum); VarToRegMap map = getInVarToRegMap(bb->bbNum); unsigned count = 0; VarSetOps::AssignNoCopy(compiler, currentLiveVars, VarSetOps::Intersection(compiler, registerCandidateVars, bb->bbLiveIn)); VarSetOps::Iter iter(compiler, currentLiveVars); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { unsigned varNum = compiler->lvaTrackedIndexToLclNum(varIndex); LclVarDsc* varDsc = compiler->lvaGetDesc(varNum); regNumber oldRegNum = varDsc->GetRegNum(); regNumber newRegNum = getVarReg(map, varIndex); if (oldRegNum != newRegNum) { JITDUMP(" V%02u(%s->%s)", varNum, compiler->compRegVarName(oldRegNum), compiler->compRegVarName(newRegNum)); varDsc->SetRegNum(newRegNum); count++; #ifdef USING_VARIABLE_LIVE_RANGE BasicBlock* prevReportedBlock = bb->bbPrev; if (bb->bbPrev != nullptr && bb->bbPrev->isBBCallAlwaysPairTail()) { // For callf+always pair we generate the code for the always // block in genCallFinally and skip it, so we don't report // anything for it (it has only trivial instructions, so that // does not matter much). So whether we need to rehome or not // depends on what we reported at the end of the callf block. prevReportedBlock = bb->bbPrev->bbPrev; } if (prevReportedBlock != nullptr && VarSetOps::IsMember(compiler, prevReportedBlock->bbLiveOut, varIndex)) { // varDsc was alive on previous block end so it has an open // "VariableLiveRange" which should change to be according to // "getInVarToRegMap" compiler->codeGen->getVariableLiveKeeper()->siUpdateVariableLiveRange(varDsc, varNum); } #endif // USING_VARIABLE_LIVE_RANGE } else if (newRegNum != REG_STK) { JITDUMP(" V%02u(%s)", varNum, compiler->compRegVarName(newRegNum)); count++; } } if (count == 0) { JITDUMP(" <none>\n"); } JITDUMP("\n"); } void Interval::setLocalNumber(Compiler* compiler, unsigned lclNum, LinearScan* linScan) { const LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum); assert(varDsc->lvTracked); assert(varDsc->lvVarIndex < compiler->lvaTrackedCount); linScan->localVarIntervals[varDsc->lvVarIndex] = this; assert(linScan->getIntervalForLocalVar(varDsc->lvVarIndex) == this); this->isLocalVar = true; this->varNum = lclNum; } //------------------------------------------------------------------------ // LinearScan:identifyCandidatesExceptionDataflow: Build the set of variables exposed on EH flow edges // // Notes: // This logic was originally cloned from fgInterBlockLocalVarLiveness. // void LinearScan::identifyCandidatesExceptionDataflow() { for (BasicBlock* const block : compiler->Blocks()) { if (block->hasEHBoundaryIn()) { // live on entry to handler VarSetOps::UnionD(compiler, exceptVars, block->bbLiveIn); } if (block->hasEHBoundaryOut()) { VarSetOps::UnionD(compiler, exceptVars, block->bbLiveOut); if (block->bbJumpKind == BBJ_EHFINALLYRET) { // Live on exit from finally. // We track these separately because, in addition to having EH live-out semantics, // we need to mark them must-init. VarSetOps::UnionD(compiler, finallyVars, block->bbLiveOut); } } } #ifdef DEBUG if (VERBOSE) { JITDUMP("EH Vars: "); INDEBUG(dumpConvertedVarSet(compiler, exceptVars)); JITDUMP("\nFinally Vars: "); INDEBUG(dumpConvertedVarSet(compiler, finallyVars)); JITDUMP("\n\n"); } // All variables live on exit from a 'finally' block should be marked lvLiveInOutOfHndlr. // and as 'explicitly initialized' (must-init) for GC-ref types. VarSetOps::Iter iter(compiler, exceptVars); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { unsigned varNum = compiler->lvaTrackedIndexToLclNum(varIndex); LclVarDsc* varDsc = compiler->lvaGetDesc(varNum); assert(varDsc->lvLiveInOutOfHndlr); if (varTypeIsGC(varDsc) && VarSetOps::IsMember(compiler, finallyVars, varIndex) && !varDsc->lvIsParam) { assert(varDsc->lvMustInit); } } #endif } bool LinearScan::isRegCandidate(LclVarDsc* varDsc) { if (!enregisterLocalVars) { return false; } assert(compiler->compEnregLocals()); if (!varDsc->lvTracked) { return false; } #if !defined(TARGET_64BIT) if (varDsc->lvType == TYP_LONG) { // Long variables should not be register candidates. // Lowering will have split any candidate lclVars into lo/hi vars. return false; } #endif // !defined(TARGET_64BIT) // If we have JMP, reg args must be put on the stack if (compiler->compJmpOpUsed && varDsc->lvIsRegArg) { return false; } // Don't allocate registers for dependently promoted struct fields if (compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc)) { return false; } // Don't enregister if the ref count is zero. if (varDsc->lvRefCnt() == 0) { varDsc->setLvRefCntWtd(0); return false; } // Variables that are address-exposed are never enregistered, or tracked. // A struct may be promoted, and a struct that fits in a register may be fully enregistered. // Pinned variables may not be tracked (a condition of the GCInfo representation) // or enregistered, on x86 -- it is believed that we can enregister pinned (more properly, "pinning") // references when using the general GC encoding. unsigned lclNum = compiler->lvaGetLclNum(varDsc); if (varDsc->IsAddressExposed() || !varDsc->IsEnregisterableType() || (!compiler->compEnregStructLocals() && (varDsc->lvType == TYP_STRUCT))) { #ifdef DEBUG DoNotEnregisterReason dner; if (varDsc->IsAddressExposed()) { dner = DoNotEnregisterReason::AddrExposed; } else if (!varDsc->IsEnregisterableType()) { dner = DoNotEnregisterReason::NotRegSizeStruct; } else { dner = DoNotEnregisterReason::DontEnregStructs; } #endif // DEBUG compiler->lvaSetVarDoNotEnregister(lclNum DEBUGARG(dner)); return false; } else if (varDsc->lvPinned) { varDsc->lvTracked = 0; #ifdef JIT32_GCENCODER compiler->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::PinningRef)); #endif // JIT32_GCENCODER return false; } // Are we not optimizing and we have exception handlers? // if so mark all args and locals as volatile, so that they // won't ever get enregistered. // if (compiler->opts.MinOpts() && compiler->compHndBBtabCount > 0) { compiler->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LiveInOutOfHandler)); } if (varDsc->lvDoNotEnregister) { return false; } switch (genActualType(varDsc->TypeGet())) { case TYP_FLOAT: case TYP_DOUBLE: return !compiler->opts.compDbgCode; case TYP_INT: case TYP_LONG: case TYP_REF: case TYP_BYREF: break; #ifdef FEATURE_SIMD case TYP_SIMD8: case TYP_SIMD12: case TYP_SIMD16: case TYP_SIMD32: return !varDsc->lvPromoted; #endif // FEATURE_SIMD case TYP_STRUCT: // TODO-1stClassStructs: support vars with GC pointers. The issue is that such // vars will have `lvMustInit` set, because emitter has poor support for struct liveness, // but if the variable is tracked the prolog generator would expect it to be in liveIn set, // so an assert in `genFnProlog` will fire. return compiler->compEnregStructLocals() && !varDsc->HasGCPtr(); case TYP_UNDEF: case TYP_UNKNOWN: noway_assert(!"lvType not set correctly"); varDsc->lvType = TYP_INT; return false; default: return false; } return true; } // Identify locals & compiler temps that are register candidates // TODO-Cleanup: This was cloned from Compiler::lvaSortByRefCount() in lclvars.cpp in order // to avoid perturbation, but should be merged. void LinearScan::identifyCandidates() { if (enregisterLocalVars) { // Initialize the set of lclVars that are candidates for register allocation. VarSetOps::AssignNoCopy(compiler, registerCandidateVars, VarSetOps::MakeEmpty(compiler)); // Initialize the sets of lclVars that are used to determine whether, and for which lclVars, // we need to perform resolution across basic blocks. // Note that we can't do this in the constructor because the number of tracked lclVars may // change between the constructor and the actual allocation. VarSetOps::AssignNoCopy(compiler, resolutionCandidateVars, VarSetOps::MakeEmpty(compiler)); VarSetOps::AssignNoCopy(compiler, splitOrSpilledVars, VarSetOps::MakeEmpty(compiler)); // We set enregisterLocalVars to true only if there are tracked lclVars assert(compiler->lvaCount != 0); } else if (compiler->lvaCount == 0) { // Nothing to do. Note that even if enregisterLocalVars is false, we still need to set the // lvLRACandidate field on all the lclVars to false if we have any. return; } VarSetOps::AssignNoCopy(compiler, exceptVars, VarSetOps::MakeEmpty(compiler)); VarSetOps::AssignNoCopy(compiler, finallyVars, VarSetOps::MakeEmpty(compiler)); if (compiler->compHndBBtabCount > 0) { identifyCandidatesExceptionDataflow(); } unsigned lclNum; LclVarDsc* varDsc; // While we build intervals for the candidate lclVars, we will determine the floating point // lclVars, if any, to consider for callee-save register preferencing. // We maintain two sets of FP vars - those that meet the first threshold of weighted ref Count, // and those that meet the second. // The first threshold is used for methods that are heuristically deemed either to have light // fp usage, or other factors that encourage conservative use of callee-save registers, such // as multiple exits (where there might be an early exit that woudl be excessively penalized by // lots of prolog/epilog saves & restores). // The second threshold is used where there are factors deemed to make it more likely that fp // fp callee save registers will be needed, such as loops or many fp vars. // We keep two sets of vars, since we collect some of the information to determine which set to // use as we iterate over the vars. // When we are generating AVX code on non-Unix (FEATURE_PARTIAL_SIMD_CALLEE_SAVE), we maintain an // additional set of LargeVectorType vars, and there is a separate threshold defined for those. // It is assumed that if we encounter these, that we should consider this a "high use" scenario, // so we don't maintain two sets of these vars. // This is defined as thresholdLargeVectorRefCntWtd, as we are likely to use the same mechanism // for vectors on Arm64, though the actual value may differ. unsigned int floatVarCount = 0; weight_t thresholdFPRefCntWtd = 4 * BB_UNITY_WEIGHT; weight_t maybeFPRefCntWtd = 2 * BB_UNITY_WEIGHT; VARSET_TP fpMaybeCandidateVars(VarSetOps::UninitVal()); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE unsigned int largeVectorVarCount = 0; weight_t thresholdLargeVectorRefCntWtd = 4 * BB_UNITY_WEIGHT; #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE if (enregisterLocalVars) { VarSetOps::AssignNoCopy(compiler, fpCalleeSaveCandidateVars, VarSetOps::MakeEmpty(compiler)); VarSetOps::AssignNoCopy(compiler, fpMaybeCandidateVars, VarSetOps::MakeEmpty(compiler)); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE VarSetOps::AssignNoCopy(compiler, largeVectorVars, VarSetOps::MakeEmpty(compiler)); VarSetOps::AssignNoCopy(compiler, largeVectorCalleeSaveCandidateVars, VarSetOps::MakeEmpty(compiler)); #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE } #if DOUBLE_ALIGN unsigned refCntStk = 0; unsigned refCntReg = 0; weight_t refCntWtdReg = 0; unsigned refCntStkParam = 0; // sum of ref counts for all stack based parameters weight_t refCntWtdStkDbl = 0; // sum of wtd ref counts for stack based doubles doDoubleAlign = false; bool checkDoubleAlign = true; if (compiler->codeGen->isFramePointerRequired() || compiler->opts.MinOpts()) { checkDoubleAlign = false; } else { switch (compiler->getCanDoubleAlign()) { case MUST_DOUBLE_ALIGN: doDoubleAlign = true; checkDoubleAlign = false; break; case CAN_DOUBLE_ALIGN: break; case CANT_DOUBLE_ALIGN: doDoubleAlign = false; checkDoubleAlign = false; break; default: unreached(); } } #endif // DOUBLE_ALIGN // Check whether register variables are permitted. if (!enregisterLocalVars) { localVarIntervals = nullptr; } else if (compiler->lvaTrackedCount > 0) { // initialize mapping from tracked local to interval localVarIntervals = new (compiler, CMK_LSRA) Interval*[compiler->lvaTrackedCount]; } INTRACK_STATS(regCandidateVarCount = 0); for (lclNum = 0, varDsc = compiler->lvaTable; lclNum < compiler->lvaCount; lclNum++, varDsc++) { // Initialize all variables to REG_STK varDsc->SetRegNum(REG_STK); #ifndef TARGET_64BIT varDsc->SetOtherReg(REG_STK); #endif // TARGET_64BIT if (!enregisterLocalVars) { varDsc->lvLRACandidate = false; continue; } #if DOUBLE_ALIGN if (checkDoubleAlign) { if (varDsc->lvIsParam && !varDsc->lvIsRegArg) { refCntStkParam += varDsc->lvRefCnt(); } else if (!isRegCandidate(varDsc) || varDsc->lvDoNotEnregister) { refCntStk += varDsc->lvRefCnt(); if ((varDsc->lvType == TYP_DOUBLE) || ((varTypeIsStruct(varDsc) && varDsc->lvStructDoubleAlign && (compiler->lvaGetPromotionType(varDsc) != Compiler::PROMOTION_TYPE_INDEPENDENT)))) { refCntWtdStkDbl += varDsc->lvRefCntWtd(); } } else { refCntReg += varDsc->lvRefCnt(); refCntWtdReg += varDsc->lvRefCntWtd(); } } #endif // DOUBLE_ALIGN // Start with the assumption that it's a candidate. varDsc->lvLRACandidate = 1; // Start with lvRegister as false - set it true only if the variable gets // the same register assignment throughout varDsc->lvRegister = false; if (!isRegCandidate(varDsc)) { varDsc->lvLRACandidate = 0; if (varDsc->lvTracked) { localVarIntervals[varDsc->lvVarIndex] = nullptr; } // The current implementation of multi-reg structs that are referenced collectively // (i.e. by refering to the parent lclVar rather than each field separately) relies // on all or none of the fields being candidates. if (varDsc->lvIsStructField) { LclVarDsc* parentVarDsc = compiler->lvaGetDesc(varDsc->lvParentLcl); if (parentVarDsc->lvIsMultiRegRet && !parentVarDsc->lvDoNotEnregister) { JITDUMP("Setting multi-reg struct V%02u as not enregisterable:", varDsc->lvParentLcl); compiler->lvaSetVarDoNotEnregister(varDsc->lvParentLcl DEBUGARG(DoNotEnregisterReason::BlockOp)); for (unsigned int i = 0; i < parentVarDsc->lvFieldCnt; i++) { LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(parentVarDsc->lvFieldLclStart + i); JITDUMP(" V%02u", parentVarDsc->lvFieldLclStart + i); if (fieldVarDsc->lvTracked) { fieldVarDsc->lvLRACandidate = 0; localVarIntervals[fieldVarDsc->lvVarIndex] = nullptr; VarSetOps::RemoveElemD(compiler, registerCandidateVars, fieldVarDsc->lvVarIndex); JITDUMP("*"); } // This is not accurate, but we need a non-zero refCnt for the parent so that it will // be allocated to the stack. parentVarDsc->setLvRefCnt(parentVarDsc->lvRefCnt() + fieldVarDsc->lvRefCnt()); } JITDUMP("\n"); } } continue; } if (varDsc->lvLRACandidate) { var_types type = varDsc->GetActualRegisterType(); if (varTypeUsesFloatReg(type)) { compiler->compFloatingPointUsed = true; } Interval* newInt = newInterval(type); newInt->setLocalNumber(compiler, lclNum, this); VarSetOps::AddElemD(compiler, registerCandidateVars, varDsc->lvVarIndex); // we will set this later when we have determined liveness varDsc->lvMustInit = false; if (varDsc->lvIsStructField) { newInt->isStructField = true; } if (varDsc->lvLiveInOutOfHndlr) { newInt->isWriteThru = varDsc->lvSingleDefRegCandidate; setIntervalAsSpilled(newInt); } INTRACK_STATS(regCandidateVarCount++); // We maintain two sets of FP vars - those that meet the first threshold of weighted ref Count, // and those that meet the second (see the definitions of thresholdFPRefCntWtd and maybeFPRefCntWtd // above). CLANG_FORMAT_COMMENT_ANCHOR; #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // Additionally, when we are generating code for a target with partial SIMD callee-save // (AVX on non-UNIX amd64 and 16-byte vectors on arm64), we keep a separate set of the // LargeVectorType vars. if (Compiler::varTypeNeedsPartialCalleeSave(varDsc->GetRegisterType())) { largeVectorVarCount++; VarSetOps::AddElemD(compiler, largeVectorVars, varDsc->lvVarIndex); weight_t refCntWtd = varDsc->lvRefCntWtd(); if (refCntWtd >= thresholdLargeVectorRefCntWtd) { VarSetOps::AddElemD(compiler, largeVectorCalleeSaveCandidateVars, varDsc->lvVarIndex); } } else #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE if (regType(type) == FloatRegisterType) { floatVarCount++; weight_t refCntWtd = varDsc->lvRefCntWtd(); if (varDsc->lvIsRegArg) { // Don't count the initial reference for register params. In those cases, // using a callee-save causes an extra copy. refCntWtd -= BB_UNITY_WEIGHT; } if (refCntWtd >= thresholdFPRefCntWtd) { VarSetOps::AddElemD(compiler, fpCalleeSaveCandidateVars, varDsc->lvVarIndex); } else if (refCntWtd >= maybeFPRefCntWtd) { VarSetOps::AddElemD(compiler, fpMaybeCandidateVars, varDsc->lvVarIndex); } } JITDUMP(" "); DBEXEC(VERBOSE, newInt->dump()); } else { localVarIntervals[varDsc->lvVarIndex] = nullptr; } } #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // Create Intervals to use for the save & restore of the upper halves of large vector lclVars. if (enregisterLocalVars) { VarSetOps::Iter largeVectorVarsIter(compiler, largeVectorVars); unsigned largeVectorVarIndex = 0; while (largeVectorVarsIter.NextElem(&largeVectorVarIndex)) { makeUpperVectorInterval(largeVectorVarIndex); } } #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE #if DOUBLE_ALIGN if (checkDoubleAlign) { // TODO-CQ: Fine-tune this: // In the legacy reg predictor, this runs after allocation, and then demotes any lclVars // allocated to the frame pointer, which is probably the wrong order. // However, because it runs after allocation, it can determine the impact of demoting // the lclVars allocated to the frame pointer. // => Here, estimate of the EBP refCnt and weighted refCnt is a wild guess. // unsigned refCntEBP = refCntReg / 8; weight_t refCntWtdEBP = refCntWtdReg / 8; doDoubleAlign = compiler->shouldDoubleAlign(refCntStk, refCntEBP, refCntWtdEBP, refCntStkParam, refCntWtdStkDbl); } #endif // DOUBLE_ALIGN // The factors we consider to determine which set of fp vars to use as candidates for callee save // registers current include the number of fp vars, whether there are loops, and whether there are // multiple exits. These have been selected somewhat empirically, but there is probably room for // more tuning. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (VERBOSE) { printf("\nFP callee save candidate vars: "); if (enregisterLocalVars && !VarSetOps::IsEmpty(compiler, fpCalleeSaveCandidateVars)) { dumpConvertedVarSet(compiler, fpCalleeSaveCandidateVars); printf("\n"); } else { printf("None\n\n"); } } #endif JITDUMP("floatVarCount = %d; hasLoops = %s, singleExit = %s\n", floatVarCount, dspBool(compiler->fgHasLoops), dspBool(compiler->fgReturnBlocks == nullptr || compiler->fgReturnBlocks->next == nullptr)); // Determine whether to use the 2nd, more aggressive, threshold for fp callee saves. if (floatVarCount > 6 && compiler->fgHasLoops && (compiler->fgReturnBlocks == nullptr || compiler->fgReturnBlocks->next == nullptr)) { assert(enregisterLocalVars); #ifdef DEBUG if (VERBOSE) { printf("Adding additional fp callee save candidates: \n"); if (!VarSetOps::IsEmpty(compiler, fpMaybeCandidateVars)) { dumpConvertedVarSet(compiler, fpMaybeCandidateVars); printf("\n"); } else { printf("None\n\n"); } } #endif VarSetOps::UnionD(compiler, fpCalleeSaveCandidateVars, fpMaybeCandidateVars); } // From here on, we're only interested in the exceptVars that are candidates. if (enregisterLocalVars && (compiler->compHndBBtabCount > 0)) { VarSetOps::IntersectionD(compiler, exceptVars, registerCandidateVars); } #ifdef TARGET_ARM #ifdef DEBUG if (VERBOSE) { // Frame layout is only pre-computed for ARM printf("\nlvaTable after IdentifyCandidates\n"); compiler->lvaTableDump(Compiler::FrameLayoutState::PRE_REGALLOC_FRAME_LAYOUT); } #endif // DEBUG #endif // TARGET_ARM } // TODO-Throughput: This mapping can surely be more efficiently done void LinearScan::initVarRegMaps() { if (!enregisterLocalVars) { inVarToRegMaps = nullptr; outVarToRegMaps = nullptr; return; } assert(compiler->lvaTrackedFixed); // We should have already set this to prevent us from adding any new tracked // variables. // The compiler memory allocator requires that the allocation be an // even multiple of int-sized objects unsigned int varCount = compiler->lvaTrackedCount; regMapCount = roundUp(varCount, (unsigned)sizeof(int)); // Not sure why blocks aren't numbered from zero, but they don't appear to be. // So, if we want to index by bbNum we have to know the maximum value. unsigned int bbCount = compiler->fgBBNumMax + 1; inVarToRegMaps = new (compiler, CMK_LSRA) regNumberSmall*[bbCount]; outVarToRegMaps = new (compiler, CMK_LSRA) regNumberSmall*[bbCount]; if (varCount > 0) { // This VarToRegMap is used during the resolution of critical edges. sharedCriticalVarToRegMap = new (compiler, CMK_LSRA) regNumberSmall[regMapCount]; for (unsigned int i = 0; i < bbCount; i++) { VarToRegMap inVarToRegMap = new (compiler, CMK_LSRA) regNumberSmall[regMapCount]; VarToRegMap outVarToRegMap = new (compiler, CMK_LSRA) regNumberSmall[regMapCount]; for (unsigned int j = 0; j < regMapCount; j++) { inVarToRegMap[j] = REG_STK; outVarToRegMap[j] = REG_STK; } inVarToRegMaps[i] = inVarToRegMap; outVarToRegMaps[i] = outVarToRegMap; } } else { sharedCriticalVarToRegMap = nullptr; for (unsigned int i = 0; i < bbCount; i++) { inVarToRegMaps[i] = nullptr; outVarToRegMaps[i] = nullptr; } } } void LinearScan::setInVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg) { assert(enregisterLocalVars); assert(reg < UCHAR_MAX && varNum < compiler->lvaCount); inVarToRegMaps[bbNum][compiler->lvaTable[varNum].lvVarIndex] = (regNumberSmall)reg; } void LinearScan::setOutVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg) { assert(enregisterLocalVars); assert(reg < UCHAR_MAX && varNum < compiler->lvaCount); outVarToRegMaps[bbNum][compiler->lvaTable[varNum].lvVarIndex] = (regNumberSmall)reg; } LinearScan::SplitEdgeInfo LinearScan::getSplitEdgeInfo(unsigned int bbNum) { assert(enregisterLocalVars); SplitEdgeInfo splitEdgeInfo; assert(bbNum <= compiler->fgBBNumMax); assert(bbNum > bbNumMaxBeforeResolution); assert(splitBBNumToTargetBBNumMap != nullptr); splitBBNumToTargetBBNumMap->Lookup(bbNum, &splitEdgeInfo); assert(splitEdgeInfo.toBBNum <= bbNumMaxBeforeResolution); assert(splitEdgeInfo.fromBBNum <= bbNumMaxBeforeResolution); return splitEdgeInfo; } VarToRegMap LinearScan::getInVarToRegMap(unsigned int bbNum) { assert(enregisterLocalVars); assert(bbNum <= compiler->fgBBNumMax); // For the blocks inserted to split critical edges, the inVarToRegMap is // equal to the outVarToRegMap at the "from" block. if (bbNum > bbNumMaxBeforeResolution) { SplitEdgeInfo splitEdgeInfo = getSplitEdgeInfo(bbNum); unsigned fromBBNum = splitEdgeInfo.fromBBNum; if (fromBBNum == 0) { assert(splitEdgeInfo.toBBNum != 0); return inVarToRegMaps[splitEdgeInfo.toBBNum]; } else { return outVarToRegMaps[fromBBNum]; } } return inVarToRegMaps[bbNum]; } VarToRegMap LinearScan::getOutVarToRegMap(unsigned int bbNum) { assert(enregisterLocalVars); assert(bbNum <= compiler->fgBBNumMax); if (bbNum == 0) { return nullptr; } // For the blocks inserted to split critical edges, the outVarToRegMap is // equal to the inVarToRegMap at the target. if (bbNum > bbNumMaxBeforeResolution) { // If this is an empty block, its in and out maps are both the same. // We identify this case by setting fromBBNum or toBBNum to 0, and using only the other. SplitEdgeInfo splitEdgeInfo = getSplitEdgeInfo(bbNum); unsigned toBBNum = splitEdgeInfo.toBBNum; if (toBBNum == 0) { assert(splitEdgeInfo.fromBBNum != 0); return outVarToRegMaps[splitEdgeInfo.fromBBNum]; } else { return inVarToRegMaps[toBBNum]; } } return outVarToRegMaps[bbNum]; } //------------------------------------------------------------------------ // setVarReg: Set the register associated with a variable in the given 'bbVarToRegMap'. // // Arguments: // bbVarToRegMap - the map of interest // trackedVarIndex - the lvVarIndex for the variable // reg - the register to which it is being mapped // // Return Value: // None // void LinearScan::setVarReg(VarToRegMap bbVarToRegMap, unsigned int trackedVarIndex, regNumber reg) { assert(trackedVarIndex < compiler->lvaTrackedCount); regNumberSmall regSmall = (regNumberSmall)reg; assert((regNumber)regSmall == reg); bbVarToRegMap[trackedVarIndex] = regSmall; } //------------------------------------------------------------------------ // getVarReg: Get the register associated with a variable in the given 'bbVarToRegMap'. // // Arguments: // bbVarToRegMap - the map of interest // trackedVarIndex - the lvVarIndex for the variable // // Return Value: // The register to which 'trackedVarIndex' is mapped // regNumber LinearScan::getVarReg(VarToRegMap bbVarToRegMap, unsigned int trackedVarIndex) { assert(enregisterLocalVars); assert(trackedVarIndex < compiler->lvaTrackedCount); return (regNumber)bbVarToRegMap[trackedVarIndex]; } // Initialize the incoming VarToRegMap to the given map values (generally a predecessor of // the block) VarToRegMap LinearScan::setInVarToRegMap(unsigned int bbNum, VarToRegMap srcVarToRegMap) { assert(enregisterLocalVars); VarToRegMap inVarToRegMap = inVarToRegMaps[bbNum]; memcpy(inVarToRegMap, srcVarToRegMap, (regMapCount * sizeof(regNumber))); return inVarToRegMap; } //------------------------------------------------------------------------ // checkLastUses: Check correctness of last use flags // // Arguments: // The block for which we are checking last uses. // // Notes: // This does a backward walk of the RefPositions, starting from the liveOut set. // This method was previously used to set the last uses, which were computed by // liveness, but were not create in some cases of multiple lclVar references in the // same tree. However, now that last uses are computed as RefPositions are created, // that is no longer necessary, and this method is simply retained as a check. // The exception to the check-only behavior is when LSRA_EXTEND_LIFETIMES if set via // COMPlus_JitStressRegs. In that case, this method is required, because even though // the RefPositions will not be marked lastUse in that case, we still need to correclty // mark the last uses on the tree nodes, which is done by this method. // #ifdef DEBUG void LinearScan::checkLastUses(BasicBlock* block) { if (VERBOSE) { JITDUMP("\n\nCHECKING LAST USES for " FMT_BB ", liveout=", block->bbNum); dumpConvertedVarSet(compiler, block->bbLiveOut); JITDUMP("\n==============================\n"); } unsigned keepAliveVarNum = BAD_VAR_NUM; if (compiler->lvaKeepAliveAndReportThis()) { keepAliveVarNum = compiler->info.compThisArg; assert(compiler->info.compIsStatic == false); } // find which uses are lastUses // Work backwards starting with live out. // 'computedLive' is updated to include any exposed use (including those in this // block that we've already seen). When we encounter a use, if it's // not in that set, then it's a last use. VARSET_TP computedLive(VarSetOps::MakeCopy(compiler, block->bbLiveOut)); bool foundDiff = false; RefPositionReverseIterator reverseIterator = refPositions.rbegin(); RefPosition* currentRefPosition; for (currentRefPosition = &reverseIterator; currentRefPosition->refType != RefTypeBB; reverseIterator++, currentRefPosition = &reverseIterator) { // We should never see ParamDefs or ZeroInits within a basic block. assert(currentRefPosition->refType != RefTypeParamDef && currentRefPosition->refType != RefTypeZeroInit); if (currentRefPosition->isIntervalRef() && currentRefPosition->getInterval()->isLocalVar) { unsigned varNum = currentRefPosition->getInterval()->varNum; unsigned varIndex = currentRefPosition->getInterval()->getVarIndex(compiler); LsraLocation loc = currentRefPosition->nodeLocation; // We should always have a tree node for a localVar, except for the "special" RefPositions. GenTree* tree = currentRefPosition->treeNode; assert(tree != nullptr || currentRefPosition->refType == RefTypeExpUse || currentRefPosition->refType == RefTypeDummyDef); if (!VarSetOps::IsMember(compiler, computedLive, varIndex) && varNum != keepAliveVarNum) { // There was no exposed use, so this is a "last use" (and we mark it thus even if it's a def) if (extendLifetimes()) { // NOTE: this is a bit of a hack. When extending lifetimes, the "last use" bit will be clear. // This bit, however, would normally be used during resolveLocalRef to set the value of // LastUse on the node for a ref position. If this bit is not set correctly even when // extending lifetimes, the code generator will assert as it expects to have accurate last // use information. To avoid these asserts, set the LastUse bit here. // Note also that extendLifetimes() is an LSRA stress mode, so it will only be true for // Checked or Debug builds, for which this method will be executed. if (tree != nullptr) { tree->AsLclVar()->SetLastUse(currentRefPosition->multiRegIdx); } } else if (!currentRefPosition->lastUse) { JITDUMP("missing expected last use of V%02u @%u\n", compiler->lvaTrackedIndexToLclNum(varIndex), loc); foundDiff = true; } VarSetOps::AddElemD(compiler, computedLive, varIndex); } else if (currentRefPosition->lastUse) { JITDUMP("unexpected last use of V%02u @%u\n", compiler->lvaTrackedIndexToLclNum(varIndex), loc); foundDiff = true; } else if (extendLifetimes() && tree != nullptr) { // NOTE: see the comment above re: the extendLifetimes hack. tree->AsLclVar()->ClearLastUse(currentRefPosition->multiRegIdx); } if (currentRefPosition->refType == RefTypeDef || currentRefPosition->refType == RefTypeDummyDef) { VarSetOps::RemoveElemD(compiler, computedLive, varIndex); } } assert(reverseIterator != refPositions.rend()); } VARSET_TP liveInNotComputedLive(VarSetOps::Diff(compiler, block->bbLiveIn, computedLive)); // We may have exception vars in the liveIn set of exception blocks that are not computed live. if (compiler->ehBlockHasExnFlowDsc(block)) { VarSetOps::DiffD(compiler, liveInNotComputedLive, compiler->fgGetHandlerLiveVars(block)); } VarSetOps::Iter liveInNotComputedLiveIter(compiler, liveInNotComputedLive); unsigned liveInNotComputedLiveIndex = 0; while (liveInNotComputedLiveIter.NextElem(&liveInNotComputedLiveIndex)) { LclVarDsc* varDesc = compiler->lvaGetDescByTrackedIndex(liveInNotComputedLiveIndex); if (varDesc->lvLRACandidate) { JITDUMP(FMT_BB ": V%02u is in LiveIn set, but not computed live.\n", block->bbNum, compiler->lvaTrackedIndexToLclNum(liveInNotComputedLiveIndex)); foundDiff = true; } } VarSetOps::DiffD(compiler, computedLive, block->bbLiveIn); const VARSET_TP& computedLiveNotLiveIn(computedLive); // reuse the buffer. VarSetOps::Iter computedLiveNotLiveInIter(compiler, computedLiveNotLiveIn); unsigned computedLiveNotLiveInIndex = 0; while (computedLiveNotLiveInIter.NextElem(&computedLiveNotLiveInIndex)) { LclVarDsc* varDesc = compiler->lvaGetDescByTrackedIndex(computedLiveNotLiveInIndex); if (varDesc->lvLRACandidate) { JITDUMP(FMT_BB ": V%02u is computed live, but not in LiveIn set.\n", block->bbNum, compiler->lvaTrackedIndexToLclNum(computedLiveNotLiveInIndex)); foundDiff = true; } } assert(!foundDiff); } #endif // DEBUG //------------------------------------------------------------------------ // findPredBlockForLiveIn: Determine which block should be used for the register locations of the live-in variables. // // Arguments: // block - The block for which we're selecting a predecesor. // prevBlock - The previous block in in allocation order. // pPredBlockIsAllocated - A debug-only argument that indicates whether any of the predecessors have been seen // in allocation order. // // Return Value: // The selected predecessor. // // Assumptions: // in DEBUG, caller initializes *pPredBlockIsAllocated to false, and it will be set to true if the block // returned is in fact a predecessor. // // Notes: // This will select a predecessor based on the heuristics obtained by getLsraBlockBoundaryLocations(), which can be // one of: // LSRA_BLOCK_BOUNDARY_PRED - Use the register locations of a predecessor block (default) // LSRA_BLOCK_BOUNDARY_LAYOUT - Use the register locations of the previous block in layout order. // This is the only case where this actually returns a different block. // LSRA_BLOCK_BOUNDARY_ROTATE - Rotate the register locations from a predecessor. // For this case, the block returned is the same as for LSRA_BLOCK_BOUNDARY_PRED, but // the register locations will be "rotated" to stress the resolution and allocation // code. BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block, BasicBlock* prevBlock DEBUGARG(bool* pPredBlockIsAllocated)) { BasicBlock* predBlock = nullptr; assert(*pPredBlockIsAllocated == false); // Blocks with exception flow on entry use no predecessor blocks, as all incoming vars // are on the stack. if (blockInfo[block->bbNum].hasEHBoundaryIn) { JITDUMP("\n\nIncoming EH boundary; "); return nullptr; } if (block == compiler->fgFirstBB) { return nullptr; } if (block->bbPreds == nullptr) { assert((block != compiler->fgFirstBB) || (prevBlock != nullptr)); JITDUMP("\n\nNo predecessor; "); // Some throw blocks do not have predecessor. For such blocks, we want to return the fact // that predecessor is indeed null instead of returning the prevBlock. Returning prevBlock // will be wrong, because LSRA would think that the variable is live in registers based on // the lexical flow, but that won't be true according to the control flow. // Example: // // IG05: // ... ; V01 is in 'rdi' // JNE IG07 // ... // IG06: // ... // ... ; V01 is in 'rbx' // JMP IG08 // IG07: // ... ; LSRA thinks V01 is in 'rbx' if IG06 is set as previous block of IG07. // .... // CALL CORINFO_HELP_RNGCHKFAIL // ... // IG08: // ... // ... if (block->bbJumpKind == BBJ_THROW) { JITDUMP(" - throw block; "); return nullptr; } // We may have unreachable blocks, due to optimization. // We don't want to set the predecessor as null in this case, since that will result in // unnecessary DummyDefs, and possibly result in inconsistencies requiring resolution // (since these unreachable blocks can have reachable successors). return prevBlock; } #ifdef DEBUG if (getLsraBlockBoundaryLocations() == LSRA_BLOCK_BOUNDARY_LAYOUT) { if (prevBlock != nullptr) { predBlock = prevBlock; } } else #endif // DEBUG { predBlock = block->GetUniquePred(compiler); if (predBlock != nullptr) { // We should already have returned null if this block has a single incoming EH boundary edge. assert(!predBlock->hasEHBoundaryOut()); if (isBlockVisited(predBlock)) { if (predBlock->bbJumpKind == BBJ_COND) { // Special handling to improve matching on backedges. BasicBlock* otherBlock = (block == predBlock->bbNext) ? predBlock->bbJumpDest : predBlock->bbNext; noway_assert(otherBlock != nullptr); if (isBlockVisited(otherBlock) && !blockInfo[otherBlock->bbNum].hasEHBoundaryIn) { // This is the case when we have a conditional branch where one target has already // been visited. It would be best to use the same incoming regs as that block, // so that we have less likelihood of having to move registers. // For example, in determining the block to use for the starting register locations for // "block" in the following example, we'd like to use the same predecessor for "block" // as for "otherBlock", so that both successors of predBlock have the same locations, reducing // the likelihood of needing a split block on a backedge: // // otherPred // | // otherBlock <-+ // . . . | // | // predBlock----+ // | // block // if (blockInfo[otherBlock->bbNum].hasEHBoundaryIn) { return nullptr; } else { for (BasicBlock* const otherPred : otherBlock->PredBlocks()) { if (otherPred->bbNum == blockInfo[otherBlock->bbNum].predBBNum) { predBlock = otherPred; break; } } } } } } else { predBlock = nullptr; } } else { for (BasicBlock* const candidatePredBlock : block->PredBlocks()) { if (isBlockVisited(candidatePredBlock)) { if ((predBlock == nullptr) || (predBlock->bbWeight < candidatePredBlock->bbWeight)) { predBlock = candidatePredBlock; INDEBUG(*pPredBlockIsAllocated = true;) } } } } if (predBlock == nullptr) { predBlock = prevBlock; assert(predBlock != nullptr); JITDUMP("\n\nNo allocated predecessor; "); } } return predBlock; } #ifdef DEBUG void LinearScan::dumpVarRefPositions(const char* title) { if (enregisterLocalVars) { printf("\nVAR REFPOSITIONS %s\n", title); for (unsigned i = 0; i < compiler->lvaCount; i++) { printf("--- V%02u", i); const LclVarDsc* varDsc = compiler->lvaGetDesc(i); if (varDsc->lvIsRegCandidate()) { Interval* interval = getIntervalForLocalVar(varDsc->lvVarIndex); printf(" (Interval %d)\n", interval->intervalIndex); for (RefPosition* ref = interval->firstRefPosition; ref != nullptr; ref = ref->nextRefPosition) { ref->dump(this); } } else { printf("\n"); } } printf("\n"); } } #endif // DEBUG // Set the default rpFrameType based upon codeGen->isFramePointerRequired() // This was lifted from the register predictor // void LinearScan::setFrameType() { FrameType frameType = FT_NOT_SET; #if DOUBLE_ALIGN compiler->codeGen->setDoubleAlign(false); if (doDoubleAlign) { frameType = FT_DOUBLE_ALIGN_FRAME; compiler->codeGen->setDoubleAlign(true); } else #endif // DOUBLE_ALIGN if (compiler->codeGen->isFramePointerRequired()) { frameType = FT_EBP_FRAME; } else { if (compiler->rpMustCreateEBPCalled == false) { #ifdef DEBUG const char* reason; #endif // DEBUG compiler->rpMustCreateEBPCalled = true; if (compiler->rpMustCreateEBPFrame(INDEBUG(&reason))) { JITDUMP("; Decided to create an EBP based frame for ETW stackwalking (%s)\n", reason); compiler->codeGen->setFrameRequired(true); } } if (compiler->codeGen->isFrameRequired()) { frameType = FT_EBP_FRAME; } else { frameType = FT_ESP_FRAME; } } switch (frameType) { case FT_ESP_FRAME: noway_assert(!compiler->codeGen->isFramePointerRequired()); noway_assert(!compiler->codeGen->isFrameRequired()); compiler->codeGen->setFramePointerUsed(false); break; case FT_EBP_FRAME: compiler->codeGen->setFramePointerUsed(true); break; #if DOUBLE_ALIGN case FT_DOUBLE_ALIGN_FRAME: noway_assert(!compiler->codeGen->isFramePointerRequired()); compiler->codeGen->setFramePointerUsed(false); break; #endif // DOUBLE_ALIGN default: noway_assert(!"rpFrameType not set correctly!"); break; } // If we are using FPBASE as the frame register, we cannot also use it for // a local var. regMaskTP removeMask = RBM_NONE; if (frameType == FT_EBP_FRAME) { removeMask |= RBM_FPBASE; } compiler->rpFrameType = frameType; #ifdef TARGET_ARMARCH // Determine whether we need to reserve a register for large lclVar offsets. if (compiler->compRsvdRegCheck(Compiler::REGALLOC_FRAME_LAYOUT)) { // We reserve R10/IP1 in this case to hold the offsets in load/store instructions compiler->codeGen->regSet.rsMaskResvd |= RBM_OPT_RSVD; assert(REG_OPT_RSVD != REG_FP); JITDUMP(" Reserved REG_OPT_RSVD (%s) due to large frame\n", getRegName(REG_OPT_RSVD)); removeMask |= RBM_OPT_RSVD; } #endif // TARGET_ARMARCH if ((removeMask != RBM_NONE) && ((availableIntRegs & removeMask) != 0)) { // We know that we're already in "read mode" for availableIntRegs. However, // we need to remove these registers, so subsequent users (like callers // to allRegs()) get the right thing. The RemoveRegistersFromMasks() code // fixes up everything that already took a dependency on the value that was // previously read, so this completes the picture. availableIntRegs.OverrideAssign(availableIntRegs & ~removeMask); } } //------------------------------------------------------------------------ // copyOrMoveRegInUse: Is 'ref' a copyReg/moveReg that is still busy at the given location? // // Arguments: // ref: The RefPosition of interest // loc: The LsraLocation at which we're determining whether it's busy. // // Return Value: // true iff 'ref' is active at the given location // bool copyOrMoveRegInUse(RefPosition* ref, LsraLocation loc) { if (!ref->copyReg && !ref->moveReg) { return false; } if (ref->getRefEndLocation() >= loc) { return true; } Interval* interval = ref->getInterval(); RefPosition* nextRef = interval->getNextRefPosition(); if (nextRef != nullptr && nextRef->treeNode == ref->treeNode && nextRef->getRefEndLocation() >= loc) { return true; } return false; } //------------------------------------------------------------------------ // getRegisterType: Get the RegisterType to use for the given RefPosition // // Arguments: // currentInterval: The interval for the current allocation // refPosition: The RefPosition of the current Interval for which a register is being allocated // // Return Value: // The RegisterType that should be allocated for this RefPosition // // Notes: // This will nearly always be identical to the registerType of the interval, except in the case // of SIMD types of 8 bytes (currently only Vector2) when they are passed and returned in integer // registers, or copied to a return temp. // This method need only be called in situations where we may be dealing with the register requirements // of a RefTypeUse RefPosition (i.e. not when we are only looking at the type of an interval, nor when // we are interested in the "defining" type of the interval). This is because the situation of interest // only happens at the use (where it must be copied to an integer register). RegisterType LinearScan::getRegisterType(Interval* currentInterval, RefPosition* refPosition) { assert(refPosition->getInterval() == currentInterval); RegisterType regType = currentInterval->registerType; regMaskTP candidates = refPosition->registerAssignment; assert((candidates & allRegs(regType)) != RBM_NONE); return regType; } //------------------------------------------------------------------------ // isMatchingConstant: Check to see whether a given register contains the constant referenced // by the given RefPosition // // Arguments: // physRegRecord: The RegRecord for the register we're interested in. // refPosition: The RefPosition for a constant interval. // // Return Value: // True iff the register was defined by an identical constant node as the current interval. // bool LinearScan::isMatchingConstant(RegRecord* physRegRecord, RefPosition* refPosition) { if ((physRegRecord->assignedInterval == nullptr) || !physRegRecord->assignedInterval->isConstant || (refPosition->refType != RefTypeDef)) { return false; } Interval* interval = refPosition->getInterval(); if (!interval->isConstant || !isRegConstant(physRegRecord->regNum, interval->registerType)) { return false; } noway_assert(refPosition->treeNode != nullptr); GenTree* otherTreeNode = physRegRecord->assignedInterval->firstRefPosition->treeNode; noway_assert(otherTreeNode != nullptr); if (refPosition->treeNode->OperGet() != otherTreeNode->OperGet()) { return false; } switch (otherTreeNode->OperGet()) { case GT_CNS_INT: { ssize_t v1 = refPosition->treeNode->AsIntCon()->IconValue(); ssize_t v2 = otherTreeNode->AsIntCon()->IconValue(); if ((v1 == v2) && (varTypeGCtype(refPosition->treeNode) == varTypeGCtype(otherTreeNode) || v1 == 0)) { #ifdef TARGET_64BIT // If the constant is negative, only reuse registers of the same type. // This is because, on a 64-bit system, we do not sign-extend immediates in registers to // 64-bits unless they are actually longs, as this requires a longer instruction. // This doesn't apply to a 32-bit system, on which long values occupy multiple registers. // (We could sign-extend, but we would have to always sign-extend, because if we reuse more // than once, we won't have access to the instruction that originally defines the constant). if ((refPosition->treeNode->TypeGet() == otherTreeNode->TypeGet()) || (v1 >= 0)) #endif // TARGET_64BIT { return true; } } break; } case GT_CNS_DBL: { // For floating point constants, the values must be identical, not simply compare // equal. So we compare the bits. if (refPosition->treeNode->AsDblCon()->isBitwiseEqual(otherTreeNode->AsDblCon()) && (refPosition->treeNode->TypeGet() == otherTreeNode->TypeGet())) { return true; } break; } default: break; } return false; } //------------------------------------------------------------------------ // allocateReg: Find a register that satisfies the requirements for refPosition, // taking into account the preferences for the given Interval, // and possibly spilling a lower weight Interval. // // Arguments: // currentInterval: The interval for the current allocation // refPosition: The RefPosition of the current Interval for which a register is being allocated // Return Value: // The regNumber, if any, allocated to the RefPosition. // Returns REG_NA only if 'refPosition->RegOptional()' is true, and there are // no free registers and no registers containing lower-weight Intervals that can be spilled. // // Notes: // This method will prefer to allocate a free register, but if none are available, // it will look for a lower-weight Interval to spill. // Weight and farthest distance of next reference are used to determine whether an Interval // currently occupying a register should be spilled. It will be spilled either: // - At its most recent RefPosition, if that is within the current block, OR // - At the boundary between the previous block and this one // // To select a ref position for spilling. // - If refPosition->RegOptional() == false // The RefPosition chosen for spilling will be the lowest weight // of all and if there is is more than one ref position with the // same lowest weight, among them choses the one with farthest // distance to its next reference. // // - If refPosition->RegOptional() == true // The ref position chosen for spilling will not only be lowest weight // of all but also has a weight lower than 'refPosition'. If there is // no such ref position, no register will be allocated. // regNumber LinearScan::allocateReg(Interval* currentInterval, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)) { regMaskTP foundRegBit = regSelector->select(currentInterval, refPosition DEBUG_ARG(registerScore)); if (foundRegBit == RBM_NONE) { return REG_NA; } regNumber foundReg = genRegNumFromMask(foundRegBit); RegRecord* availablePhysRegRecord = getRegisterRecord(foundReg); Interval* assignedInterval = availablePhysRegRecord->assignedInterval; if ((assignedInterval != currentInterval) && isAssigned(availablePhysRegRecord ARM_ARG(getRegisterType(currentInterval, refPosition)))) { if (regSelector->isSpilling()) { // We're spilling. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM if (currentInterval->registerType == TYP_DOUBLE) { assert(genIsValidDoubleReg(availablePhysRegRecord->regNum)); unassignDoublePhysReg(availablePhysRegRecord); } else if (assignedInterval->registerType == TYP_DOUBLE) { // Make sure we spill both halves of the double register. assert(genIsValidDoubleReg(assignedInterval->assignedReg->regNum)); unassignPhysReg(assignedInterval->assignedReg, assignedInterval->recentRefPosition); } else #endif { unassignPhysReg(availablePhysRegRecord, assignedInterval->recentRefPosition); } } else { // If we considered this "unassigned" because this interval's lifetime ends before // the next ref, remember it. // For historical reasons (due to former short-circuiting of this case), if we're reassigning // the current interval to a previous assignment, we don't remember the previous interval. // Note that we need to compute this condition before calling unassignPhysReg, which wil reset // assignedInterval->physReg. bool wasAssigned = regSelector->foundUnassignedReg() && (assignedInterval != nullptr) && (assignedInterval->physReg == foundReg); unassignPhysReg(availablePhysRegRecord ARM_ARG(currentInterval->registerType)); if (regSelector->isMatchingConstant() && compiler->opts.OptimizationEnabled()) { assert(assignedInterval->isConstant); refPosition->treeNode->SetReuseRegVal(); } else if (wasAssigned) { updatePreviousInterval(availablePhysRegRecord, assignedInterval, assignedInterval->registerType); } else { assert(!regSelector->isConstAvailable()); } } } assignPhysReg(availablePhysRegRecord, currentInterval); refPosition->registerAssignment = foundRegBit; return foundReg; } //------------------------------------------------------------------------ // canSpillReg: Determine whether we can spill physRegRecord // // Arguments: // physRegRecord - reg to spill // refLocation - Location of RefPosition where this register will be spilled // // Return Value: // True - if we can spill physRegRecord // False - otherwise // bool LinearScan::canSpillReg(RegRecord* physRegRecord, LsraLocation refLocation) { assert(physRegRecord->assignedInterval != nullptr); RefPosition* recentAssignedRef = physRegRecord->assignedInterval->recentRefPosition; if (recentAssignedRef != nullptr) { // We can't spill a register that's active at the current location. // We should already have determined this with isRegBusy before calling this method. assert(!isRefPositionActive(recentAssignedRef, refLocation)); return true; } // recentAssignedRef can only be null if this is a parameter that has not yet been // moved to a register (or stack), in which case we can't spill it yet. assert(physRegRecord->assignedInterval->getLocalVar(compiler)->lvIsParam); return false; } //------------------------------------------------------------------------ // getSpillWeight: Get the weight associated with spilling the given register // // Arguments: // physRegRecord - reg to spill // // Return Value: // The weight associated with the location at which we will spill. // // Note: This helper is designed to be used only from allocateReg() and getDoubleSpillWeight() // weight_t LinearScan::getSpillWeight(RegRecord* physRegRecord) { assert(physRegRecord->assignedInterval != nullptr); RefPosition* recentAssignedRef = physRegRecord->assignedInterval->recentRefPosition; weight_t weight = BB_ZERO_WEIGHT; // We shouldn't call this method if there is no recentAssignedRef. assert(recentAssignedRef != nullptr); // We shouldn't call this method if the register is active at this location. assert(!isRefPositionActive(recentAssignedRef, currentLoc)); weight = getWeight(recentAssignedRef); return weight; } #ifdef TARGET_ARM //------------------------------------------------------------------------ // canSpillDoubleReg: Determine whether we can spill physRegRecord // // Arguments: // physRegRecord - reg to spill (must be a valid double register) // refLocation - Location of RefPosition where this register will be spilled // // Return Value: // True - if we can spill physRegRecord // False - otherwise // bool LinearScan::canSpillDoubleReg(RegRecord* physRegRecord, LsraLocation refLocation) { assert(genIsValidDoubleReg(physRegRecord->regNum)); RegRecord* physRegRecord2 = getSecondHalfRegRec(physRegRecord); if ((physRegRecord->assignedInterval != nullptr) && !canSpillReg(physRegRecord, refLocation)) { return false; } if ((physRegRecord2->assignedInterval != nullptr) && !canSpillReg(physRegRecord2, refLocation)) { return false; } return true; } //------------------------------------------------------------------------ // unassignDoublePhysReg: unassign a double register (pair) // // Arguments: // doubleRegRecord - reg to unassign // // Note: // The given RegRecord must be a valid (even numbered) double register. // void LinearScan::unassignDoublePhysReg(RegRecord* doubleRegRecord) { assert(genIsValidDoubleReg(doubleRegRecord->regNum)); RegRecord* doubleRegRecordLo = doubleRegRecord; RegRecord* doubleRegRecordHi = getSecondHalfRegRec(doubleRegRecordLo); // For a double register, we has following four cases. // Case 1: doubleRegRecLo is assigned to TYP_DOUBLE interval // Case 2: doubleRegRecLo and doubleRegRecHi are assigned to different TYP_FLOAT intervals // Case 3: doubelRegRecLo is assgined to TYP_FLOAT interval and doubleRegRecHi is nullptr // Case 4: doubleRegRecordLo is nullptr, and doubleRegRecordHi is assigned to a TYP_FLOAT interval if (doubleRegRecordLo->assignedInterval != nullptr) { if (doubleRegRecordLo->assignedInterval->registerType == TYP_DOUBLE) { // Case 1: doubleRegRecLo is assigned to TYP_DOUBLE interval unassignPhysReg(doubleRegRecordLo, doubleRegRecordLo->assignedInterval->recentRefPosition); } else { // Case 2: doubleRegRecLo and doubleRegRecHi are assigned to different TYP_FLOAT intervals // Case 3: doubelRegRecLo is assgined to TYP_FLOAT interval and doubleRegRecHi is nullptr assert(doubleRegRecordLo->assignedInterval->registerType == TYP_FLOAT); unassignPhysReg(doubleRegRecordLo, doubleRegRecordLo->assignedInterval->recentRefPosition); if (doubleRegRecordHi != nullptr) { if (doubleRegRecordHi->assignedInterval != nullptr) { assert(doubleRegRecordHi->assignedInterval->registerType == TYP_FLOAT); unassignPhysReg(doubleRegRecordHi, doubleRegRecordHi->assignedInterval->recentRefPosition); } } } } else { // Case 4: doubleRegRecordLo is nullptr, and doubleRegRecordHi is assigned to a TYP_FLOAT interval assert(doubleRegRecordHi->assignedInterval != nullptr); assert(doubleRegRecordHi->assignedInterval->registerType == TYP_FLOAT); unassignPhysReg(doubleRegRecordHi, doubleRegRecordHi->assignedInterval->recentRefPosition); } } #endif // TARGET_ARM //------------------------------------------------------------------------ // isRefPositionActive: Determine whether a given RefPosition is active at the given location // // Arguments: // refPosition - the RefPosition of interest // refLocation - the LsraLocation at which we want to know if it is active // // Return Value: // True - if this RefPosition occurs at the given location, OR // if it occurs at the previous location and is marked delayRegFree. // False - otherwise // bool LinearScan::isRefPositionActive(RefPosition* refPosition, LsraLocation refLocation) { return (refPosition->nodeLocation == refLocation || ((refPosition->nodeLocation + 1 == refLocation) && refPosition->delayRegFree)); } //------------------------------------------------------------------------ // isSpillCandidate: Determine if a register is a spill candidate for a given RefPosition. // // Arguments: // current The interval for the current allocation // refPosition The RefPosition of the current Interval for which a register is being allocated // physRegRecord The RegRecord for the register we're considering for spill // // Return Value: // True iff the given register can be spilled to accommodate the given RefPosition. // bool LinearScan::isSpillCandidate(Interval* current, RefPosition* refPosition, RegRecord* physRegRecord) { regMaskTP candidateBit = genRegMask(physRegRecord->regNum); LsraLocation refLocation = refPosition->nodeLocation; // We shouldn't be calling this if we haven't already determined that the register is not // busy until the next kill. assert(!isRegBusy(physRegRecord->regNum, current->registerType)); // We should already have determined that the register isn't actively in use. assert(!isRegInUse(physRegRecord->regNum, current->registerType)); // We shouldn't be calling this if 'refPosition' is a fixed reference to this register. assert(!refPosition->isFixedRefOfRegMask(candidateBit)); // We shouldn't be calling this if there is a fixed reference at the same location // (and it's not due to this reference), as checked above. assert(!conflictingFixedRegReference(physRegRecord->regNum, refPosition)); bool canSpill; #ifdef TARGET_ARM if (current->registerType == TYP_DOUBLE) { canSpill = canSpillDoubleReg(physRegRecord, refLocation); } else #endif // TARGET_ARM { canSpill = canSpillReg(physRegRecord, refLocation); } if (!canSpill) { return false; } return true; } // Grab a register to use to copy and then immediately use. // This is called only for localVar intervals that already have a register // assignment that is not compatible with the current RefPosition. // This is not like regular assignment, because we don't want to change // any preferences or existing register assignments. // Prefer a free register that's got the earliest next use. // Otherwise, spill something with the farthest next use // regNumber LinearScan::assignCopyReg(RefPosition* refPosition) { Interval* currentInterval = refPosition->getInterval(); assert(currentInterval != nullptr); assert(currentInterval->isActive); // Save the relatedInterval, if any, so that it doesn't get modified during allocation. Interval* savedRelatedInterval = currentInterval->relatedInterval; currentInterval->relatedInterval = nullptr; // We don't want really want to change the default assignment, // so 1) pretend this isn't active, and 2) remember the old reg regNumber oldPhysReg = currentInterval->physReg; RegRecord* oldRegRecord = currentInterval->assignedReg; assert(oldRegRecord->regNum == oldPhysReg); currentInterval->isActive = false; // We *must* allocate a register, and it will be a copyReg. Set that field now, so that // refPosition->RegOptional() will return false. refPosition->copyReg = true; RegisterScore registerScore = NONE; regNumber allocatedReg = allocateReg(currentInterval, refPosition DEBUG_ARG(&registerScore)); assert(allocatedReg != REG_NA); INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_COPY_REG, currentInterval, allocatedReg, nullptr, registerScore)); // Now restore the old info currentInterval->relatedInterval = savedRelatedInterval; currentInterval->physReg = oldPhysReg; currentInterval->assignedReg = oldRegRecord; currentInterval->isActive = true; return allocatedReg; } //------------------------------------------------------------------------ // isAssigned: This is the function to check if the given RegRecord has an assignedInterval. // // Arguments: // regRec - The RegRecord to check that it is assigned. // newRegType - There are elements to judge according to the upcoming register type. // // Return Value: // Returns true if the given RegRecord has an assignedInterval. // bool LinearScan::isAssigned(RegRecord* regRec ARM_ARG(RegisterType newRegType)) { if (regRec->assignedInterval != nullptr) { return true; } #ifdef TARGET_ARM if (newRegType == TYP_DOUBLE) { RegRecord* otherRegRecord = getSecondHalfRegRec(regRec); if (otherRegRecord->assignedInterval != nullptr) { return true; } } #endif return false; } //------------------------------------------------------------------------ // checkAndAssignInterval: Check if the interval is already assigned and // if it is then unassign the physical record // and set the assignedInterval to 'interval' // // Arguments: // regRec - The RegRecord of interest // interval - The Interval that we're going to assign to 'regRec' // void LinearScan::checkAndAssignInterval(RegRecord* regRec, Interval* interval) { Interval* assignedInterval = regRec->assignedInterval; if (assignedInterval != nullptr && assignedInterval != interval) { // This is allocated to another interval. Either it is inactive, or it was allocated as a // copyReg and is therefore not the "assignedReg" of the other interval. In the latter case, // we simply unassign it - in the former case we need to set the physReg on the interval to // REG_NA to indicate that it is no longer in that register. // The lack of checking for this case resulted in an assert in the retail version of System.dll, // in method SerialStream.GetDcbFlag. // Note that we can't check for the copyReg case, because we may have seen a more recent // RefPosition for the Interval that was NOT a copyReg. if (assignedInterval->assignedReg == regRec) { assert(assignedInterval->isActive == false); assignedInterval->physReg = REG_NA; } unassignPhysReg(regRec->regNum); } #ifdef TARGET_ARM // If 'interval' and 'assignedInterval' were both TYP_DOUBLE, then we have unassigned 'assignedInterval' // from both halves. Otherwise, if 'interval' is TYP_DOUBLE, we now need to unassign the other half. if ((interval->registerType == TYP_DOUBLE) && ((assignedInterval == nullptr) || (assignedInterval->registerType == TYP_FLOAT))) { RegRecord* otherRegRecord = getSecondHalfRegRec(regRec); assignedInterval = otherRegRecord->assignedInterval; if (assignedInterval != nullptr && assignedInterval != interval) { if (assignedInterval->assignedReg == otherRegRecord) { assert(assignedInterval->isActive == false); assignedInterval->physReg = REG_NA; } unassignPhysReg(otherRegRecord->regNum); } } #endif updateAssignedInterval(regRec, interval, interval->registerType); } // Assign the given physical register interval to the given interval void LinearScan::assignPhysReg(RegRecord* regRec, Interval* interval) { regMaskTP assignedRegMask = genRegMask(regRec->regNum); compiler->codeGen->regSet.rsSetRegsModified(assignedRegMask DEBUGARG(true)); interval->assignedReg = regRec; checkAndAssignInterval(regRec, interval); interval->physReg = regRec->regNum; interval->isActive = true; if (interval->isLocalVar) { // Prefer this register for future references interval->updateRegisterPreferences(assignedRegMask); } } //------------------------------------------------------------------------ // setIntervalAsSplit: Set this Interval as being split // // Arguments: // interval - The Interval which is being split // // Return Value: // None. // // Notes: // The given Interval will be marked as split, and it will be added to the // set of splitOrSpilledVars. // // Assumptions: // "interval" must be a lclVar interval, as tree temps are never split. // This is asserted in the call to getVarIndex(). // void LinearScan::setIntervalAsSplit(Interval* interval) { if (interval->isLocalVar) { unsigned varIndex = interval->getVarIndex(compiler); if (!interval->isSplit) { VarSetOps::AddElemD(compiler, splitOrSpilledVars, varIndex); } else { assert(VarSetOps::IsMember(compiler, splitOrSpilledVars, varIndex)); } } interval->isSplit = true; } //------------------------------------------------------------------------ // setIntervalAsSpilled: Set this Interval as being spilled // // Arguments: // interval - The Interval which is being spilled // // Return Value: // None. // // Notes: // The given Interval will be marked as spilled, and it will be added // to the set of splitOrSpilledVars. // void LinearScan::setIntervalAsSpilled(Interval* interval) { #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE if (interval->isUpperVector) { assert(interval->relatedInterval->isLocalVar); interval->isSpilled = true; // Now we need to mark the local as spilled also, even if the lower half is never spilled, // as this will use the upper part of its home location. interval = interval->relatedInterval; // We'll now mark this as spilled, so it changes the spillCost. RefPosition* recentRefPos = interval->recentRefPosition; if (!interval->isSpilled && interval->isActive && (recentRefPos != nullptr)) { VarSetOps::AddElemD(compiler, splitOrSpilledVars, interval->getVarIndex(compiler)); interval->isSpilled = true; regNumber reg = interval->physReg; spillCost[reg] = getSpillWeight(getRegisterRecord(reg)); } } #endif if (interval->isLocalVar) { unsigned varIndex = interval->getVarIndex(compiler); if (!interval->isSpilled) { VarSetOps::AddElemD(compiler, splitOrSpilledVars, varIndex); } else { assert(VarSetOps::IsMember(compiler, splitOrSpilledVars, varIndex)); } } interval->isSpilled = true; } //------------------------------------------------------------------------ // spill: Spill the "interval" starting from "fromRefPosition" (upto "toRefPosition") // // Arguments: // interval - The interval that contains the RefPosition to be spilled // fromRefPosition - The RefPosition at which the Interval is to be spilled // toRefPosition - The RefPosition at which it must be reloaded (debug only arg) // // Return Value: // None. // // Assumptions: // fromRefPosition and toRefPosition must not be null // void LinearScan::spillInterval(Interval* interval, RefPosition* fromRefPosition DEBUGARG(RefPosition* toRefPosition)) { assert(fromRefPosition != nullptr && toRefPosition != nullptr); assert(fromRefPosition->getInterval() == interval && toRefPosition->getInterval() == interval); assert(fromRefPosition->nextRefPosition == toRefPosition); if (!fromRefPosition->lastUse) { // If not allocated a register, Lcl var def/use ref positions even if reg optional // should be marked as spillAfter. Note that if it is a WriteThru interval, the value is always // written to the stack, but the WriteThru indicates that the register is no longer live. if (fromRefPosition->RegOptional() && !(interval->isLocalVar && fromRefPosition->IsActualRef())) { fromRefPosition->registerAssignment = RBM_NONE; } else { fromRefPosition->spillAfter = true; } } // Only handle the singledef intervals whose firstRefPosition is RefTypeDef and is not yet marked as spillAfter. // The singledef intervals whose firstRefPositions are already marked as spillAfter, no need to mark them as // singleDefSpill because they will always get spilled at firstRefPosition. // This helps in spilling the singleDef at definition // // Note: Only mark "singleDefSpill" for those intervals who ever get spilled. The intervals that are never spilled // will not be marked as "singleDefSpill" and hence won't get spilled at the first definition. if (interval->isSingleDef && RefTypeIsDef(interval->firstRefPosition->refType) && !interval->firstRefPosition->spillAfter) { // TODO-CQ: Check if it is beneficial to spill at def, meaning, if it is a hot block don't worry about // doing the spill. Another option is to track number of refpositions and a interval has more than X // refpositions // then perform this optimization. interval->firstRefPosition->singleDefSpill = true; } assert(toRefPosition != nullptr); #ifdef DEBUG if (VERBOSE) { dumpLsraAllocationEvent(LSRA_EVENT_SPILL, interval); } #endif // DEBUG INTRACK_STATS(updateLsraStat(STAT_SPILL, fromRefPosition->bbNum)); interval->isActive = false; setIntervalAsSpilled(interval); // If fromRefPosition occurs before the beginning of this block, mark this as living in the stack // on entry to this block. if (fromRefPosition->nodeLocation <= curBBStartLocation) { // This must be a lclVar interval assert(interval->isLocalVar); setInVarRegForBB(curBBNum, interval->varNum, REG_STK); } } //------------------------------------------------------------------------ // unassignPhysRegNoSpill: Unassign the given physical register record from // an active interval, without spilling. // // Arguments: // regRec - the RegRecord to be unassigned // // Return Value: // None. // // Assumptions: // The assignedInterval must not be null, and must be active. // // Notes: // This method is used to unassign a register when an interval needs to be moved to a // different register, but not (yet) spilled. void LinearScan::unassignPhysRegNoSpill(RegRecord* regRec) { Interval* assignedInterval = regRec->assignedInterval; assert(assignedInterval != nullptr && assignedInterval->isActive); assignedInterval->isActive = false; unassignPhysReg(regRec, nullptr); assignedInterval->isActive = true; } //------------------------------------------------------------------------ // checkAndClearInterval: Clear the assignedInterval for the given // physical register record // // Arguments: // regRec - the physical RegRecord to be unassigned // spillRefPosition - The RefPosition at which the assignedInterval is to be spilled // or nullptr if we aren't spilling // // Return Value: // None. // // Assumptions: // see unassignPhysReg // void LinearScan::checkAndClearInterval(RegRecord* regRec, RefPosition* spillRefPosition) { Interval* assignedInterval = regRec->assignedInterval; assert(assignedInterval != nullptr); regNumber thisRegNum = regRec->regNum; if (spillRefPosition == nullptr) { // Note that we can't assert for the copyReg case // if (assignedInterval->physReg == thisRegNum) { assert(assignedInterval->isActive == false); } } else { assert(spillRefPosition->getInterval() == assignedInterval); } updateAssignedInterval(regRec, nullptr, assignedInterval->registerType); } //------------------------------------------------------------------------ // unassignPhysReg: Unassign the given physical register record, and spill the // assignedInterval at the given spillRefPosition, if any. // // Arguments: // regRec - The RegRecord to be unassigned // newRegType - The RegisterType of interval that would be assigned // // Return Value: // None. // // Notes: // On ARM architecture, Intervals have to be unassigned considering // with the register type of interval that would be assigned. // void LinearScan::unassignPhysReg(RegRecord* regRec ARM_ARG(RegisterType newRegType)) { RegRecord* regRecToUnassign = regRec; #ifdef TARGET_ARM RegRecord* anotherRegRec = nullptr; if ((regRecToUnassign->assignedInterval != nullptr) && (regRecToUnassign->assignedInterval->registerType == TYP_DOUBLE)) { // If the register type of interval(being unassigned or new) is TYP_DOUBLE, // It should have to be valid double register (even register) if (!genIsValidDoubleReg(regRecToUnassign->regNum)) { regRecToUnassign = findAnotherHalfRegRec(regRec); } } else { if (newRegType == TYP_DOUBLE) { anotherRegRec = getSecondHalfRegRec(regRecToUnassign); } } #endif if (regRecToUnassign->assignedInterval != nullptr) { unassignPhysReg(regRecToUnassign, regRecToUnassign->assignedInterval->recentRefPosition); } #ifdef TARGET_ARM if ((anotherRegRec != nullptr) && (anotherRegRec->assignedInterval != nullptr)) { unassignPhysReg(anotherRegRec, anotherRegRec->assignedInterval->recentRefPosition); } #endif } //------------------------------------------------------------------------ // unassignPhysReg: Unassign the given physical register record, and spill the // assignedInterval at the given spillRefPosition, if any. // // Arguments: // regRec - the RegRecord to be unassigned // spillRefPosition - The RefPosition at which the assignedInterval is to be spilled // // Return Value: // None. // // Assumptions: // The assignedInterval must not be null. // If spillRefPosition is null, the assignedInterval must be inactive, or not currently // assigned to this register (e.g. this is a copyReg for that Interval). // Otherwise, spillRefPosition must be associated with the assignedInterval. // void LinearScan::unassignPhysReg(RegRecord* regRec, RefPosition* spillRefPosition) { Interval* assignedInterval = regRec->assignedInterval; assert(assignedInterval != nullptr); assert(spillRefPosition == nullptr || spillRefPosition->getInterval() == assignedInterval); regNumber thisRegNum = regRec->regNum; // Is assignedInterval actually still assigned to this register? bool intervalIsAssigned = (assignedInterval->physReg == thisRegNum); regNumber regToUnassign = thisRegNum; #ifdef TARGET_ARM RegRecord* anotherRegRec = nullptr; // Prepare second half RegRecord of a double register for TYP_DOUBLE if (assignedInterval->registerType == TYP_DOUBLE) { assert(isFloatRegType(regRec->registerType)); RegRecord* doubleRegRec; if (genIsValidDoubleReg(thisRegNum)) { anotherRegRec = getSecondHalfRegRec(regRec); doubleRegRec = regRec; } else { regToUnassign = REG_PREV(thisRegNum); anotherRegRec = getRegisterRecord(regToUnassign); doubleRegRec = anotherRegRec; } // Both RegRecords should have been assigned to the same interval. assert(assignedInterval == anotherRegRec->assignedInterval); if (!intervalIsAssigned && (assignedInterval->physReg == anotherRegRec->regNum)) { intervalIsAssigned = true; } clearNextIntervalRef(regToUnassign, TYP_DOUBLE); clearSpillCost(regToUnassign, TYP_DOUBLE); checkAndClearInterval(doubleRegRec, spillRefPosition); // Both RegRecords should have been unassigned together. assert(regRec->assignedInterval == nullptr); assert(anotherRegRec->assignedInterval == nullptr); } else #endif // TARGET_ARM { clearNextIntervalRef(thisRegNum, assignedInterval->registerType); clearSpillCost(thisRegNum, assignedInterval->registerType); checkAndClearInterval(regRec, spillRefPosition); } makeRegAvailable(regToUnassign, assignedInterval->registerType); RefPosition* nextRefPosition = nullptr; if (spillRefPosition != nullptr) { nextRefPosition = spillRefPosition->nextRefPosition; } if (!intervalIsAssigned && assignedInterval->physReg != REG_NA) { // This must have been a temporary copy reg, but we can't assert that because there // may have been intervening RefPositions that were not copyRegs. // reg->assignedInterval has already been set to nullptr by checkAndClearInterval() assert(regRec->assignedInterval == nullptr); return; } // regNumber victimAssignedReg = assignedInterval->physReg; assignedInterval->physReg = REG_NA; bool spill = assignedInterval->isActive && nextRefPosition != nullptr; if (spill) { // If this is an active interval, it must have a recentRefPosition, // otherwise it would not be active assert(spillRefPosition != nullptr); #if 0 // TODO-CQ: Enable this and insert an explicit GT_COPY (otherwise there's no way to communicate // to codegen that we want the copyReg to be the new home location). // If the last reference was a copyReg, and we're spilling the register // it was copied from, then make the copyReg the new primary location // if possible if (spillRefPosition->copyReg) { regNumber copyFromRegNum = victimAssignedReg; regNumber copyRegNum = genRegNumFromMask(spillRefPosition->registerAssignment); if (copyFromRegNum == thisRegNum && getRegisterRecord(copyRegNum)->assignedInterval == assignedInterval) { assert(copyRegNum != thisRegNum); assignedInterval->physReg = copyRegNum; assignedInterval->assignedReg = this->getRegisterRecord(copyRegNum); return; } } #endif // 0 #ifdef DEBUG // With JitStressRegs == 0x80 (LSRA_EXTEND_LIFETIMES), we may have a RefPosition // that is not marked lastUse even though the treeNode is a lastUse. In that case // we must not mark it for spill because the register will have been immediately freed // after use. While we could conceivably add special handling for this case in codegen, // it would be messy and undesirably cause the "bleeding" of LSRA stress modes outside // of LSRA. if (extendLifetimes() && assignedInterval->isLocalVar && RefTypeIsUse(spillRefPosition->refType) && spillRefPosition->treeNode != nullptr && spillRefPosition->treeNode->AsLclVar()->IsLastUse(spillRefPosition->multiRegIdx)) { dumpLsraAllocationEvent(LSRA_EVENT_SPILL_EXTENDED_LIFETIME, assignedInterval); assignedInterval->isActive = false; spill = false; // If the spillRefPosition occurs before the beginning of this block, it will have // been marked as living in this register on entry to this block, but we now need // to mark this as living on the stack. if (spillRefPosition->nodeLocation <= curBBStartLocation) { setInVarRegForBB(curBBNum, assignedInterval->varNum, REG_STK); if (spillRefPosition->nextRefPosition != nullptr) { setIntervalAsSpilled(assignedInterval); } } else { // Otherwise, we need to mark spillRefPosition as lastUse, or the interval // will remain active beyond its allocated range during the resolution phase. spillRefPosition->lastUse = true; } } else #endif // DEBUG { spillInterval(assignedInterval, spillRefPosition DEBUGARG(nextRefPosition)); } } // Maintain the association with the interval, if it has more references. // Or, if we "remembered" an interval assigned to this register, restore it. if (nextRefPosition != nullptr) { assignedInterval->assignedReg = regRec; } else if (canRestorePreviousInterval(regRec, assignedInterval)) { regRec->assignedInterval = regRec->previousInterval; regRec->previousInterval = nullptr; if (regRec->assignedInterval->physReg != thisRegNum) { clearNextIntervalRef(thisRegNum, regRec->assignedInterval->registerType); } else { updateNextIntervalRef(thisRegNum, regRec->assignedInterval); } #ifdef TARGET_ARM // Note: // We can not use updateAssignedInterval() and updatePreviousInterval() here, // because regRec may not be a even-numbered float register. // Update second half RegRecord of a double register for TYP_DOUBLE if (regRec->assignedInterval->registerType == TYP_DOUBLE) { RegRecord* anotherHalfRegRec = findAnotherHalfRegRec(regRec); anotherHalfRegRec->assignedInterval = regRec->assignedInterval; anotherHalfRegRec->previousInterval = nullptr; } #endif // TARGET_ARM #ifdef DEBUG if (spill) { dumpLsraAllocationEvent(LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL_AFTER_SPILL, regRec->assignedInterval, thisRegNum); } else { dumpLsraAllocationEvent(LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL, regRec->assignedInterval, thisRegNum); } #endif // DEBUG } else { updateAssignedInterval(regRec, nullptr, assignedInterval->registerType); updatePreviousInterval(regRec, nullptr, assignedInterval->registerType); } } //------------------------------------------------------------------------ // spillGCRefs: Spill any GC-type intervals that are currently in registers. // // Arguments: // killRefPosition - The RefPosition for the kill // // Return Value: // None. // // Notes: // This is used to ensure that we have no live GC refs in registers at an // unmanaged call. // void LinearScan::spillGCRefs(RefPosition* killRefPosition) { // For each physical register that can hold a GC type, // if it is occupied by an interval of a GC type, spill that interval. regMaskTP candidateRegs = killRefPosition->registerAssignment; INDEBUG(bool killedRegs = false); while (candidateRegs != RBM_NONE) { regMaskTP nextRegBit = genFindLowestBit(candidateRegs); candidateRegs &= ~nextRegBit; regNumber nextReg = genRegNumFromMask(nextRegBit); RegRecord* regRecord = getRegisterRecord(nextReg); Interval* assignedInterval = regRecord->assignedInterval; if (assignedInterval == nullptr || (assignedInterval->isActive == false)) { continue; } bool needsKill = varTypeIsGC(assignedInterval->registerType); if (!needsKill) { // The importer will assign a GC type to the rhs of an assignment if the lhs type is a GC type, // even if the rhs is not. See the CEE_STLOC* case in impImportBlockCode(). As a result, // we can have a 'GT_LCL_VAR' node with a GC type, when the lclVar itself is an integer type. // The emitter will mark this register as holding a GC type. Therfore we must spill this value. // This was exposed on Arm32 with EH write-thru. if ((assignedInterval->recentRefPosition != nullptr) && (assignedInterval->recentRefPosition->treeNode != nullptr)) { needsKill = varTypeIsGC(assignedInterval->recentRefPosition->treeNode); } } if (needsKill) { INDEBUG(killedRegs = true); unassignPhysReg(regRecord, assignedInterval->recentRefPosition); makeRegAvailable(nextReg, assignedInterval->registerType); } } INDEBUG(dumpLsraAllocationEvent(killedRegs ? LSRA_EVENT_DONE_KILL_GC_REFS : LSRA_EVENT_NO_GC_KILLS, nullptr, REG_NA, nullptr)); } //------------------------------------------------------------------------ // processBlockEndAllocation: Update var locations after 'currentBlock' has been allocated // // Arguments: // currentBlock - the BasicBlock we have just finished allocating registers for // // Return Value: // None // // Notes: // Calls processBlockEndLocations() to set the outVarToRegMap, then gets the next block, // and sets the inVarToRegMap appropriately. void LinearScan::processBlockEndAllocation(BasicBlock* currentBlock) { assert(currentBlock != nullptr); if (enregisterLocalVars) { processBlockEndLocations(currentBlock); } markBlockVisited(currentBlock); // Get the next block to allocate. // When the last block in the method has successors, there will be a final "RefTypeBB" to // ensure that we get the varToRegMap set appropriately, but in that case we don't need // to worry about "nextBlock". BasicBlock* nextBlock = getNextBlock(); if (nextBlock != nullptr) { processBlockStartLocations(nextBlock); } } //------------------------------------------------------------------------ // rotateBlockStartLocation: When in the LSRA_BLOCK_BOUNDARY_ROTATE stress mode, attempt to // "rotate" the register assignment for a localVar to the next higher // register that is available. // // Arguments: // interval - the Interval for the variable whose register is getting rotated // targetReg - its register assignment from the predecessor block being used for live-in // availableRegs - registers available for use // // Return Value: // The new register to use. #ifdef DEBUG regNumber LinearScan::rotateBlockStartLocation(Interval* interval, regNumber targetReg, regMaskTP availableRegs) { if (targetReg != REG_STK && getLsraBlockBoundaryLocations() == LSRA_BLOCK_BOUNDARY_ROTATE) { // If we're rotating the register locations at block boundaries, try to use // the next higher register number of the appropriate register type. regMaskTP candidateRegs = allRegs(interval->registerType) & availableRegs; regNumber firstReg = REG_NA; regNumber newReg = REG_NA; while (candidateRegs != RBM_NONE) { regMaskTP nextRegBit = genFindLowestBit(candidateRegs); candidateRegs &= ~nextRegBit; regNumber nextReg = genRegNumFromMask(nextRegBit); if (nextReg > targetReg) { newReg = nextReg; break; } else if (firstReg == REG_NA) { firstReg = nextReg; } } if (newReg == REG_NA) { assert(firstReg != REG_NA); newReg = firstReg; } targetReg = newReg; } return targetReg; } #endif // DEBUG #ifdef TARGET_ARM //-------------------------------------------------------------------------------------- // isSecondHalfReg: Test if recRec is second half of double register // which is assigned to an interval. // // Arguments: // regRec - a register to be tested // interval - an interval which is assigned to some register // // Assumptions: // None // // Return Value: // True only if regRec is second half of assignedReg in interval // bool LinearScan::isSecondHalfReg(RegRecord* regRec, Interval* interval) { RegRecord* assignedReg = interval->assignedReg; if (assignedReg != nullptr && interval->registerType == TYP_DOUBLE) { // interval should have been allocated to a valid double register assert(genIsValidDoubleReg(assignedReg->regNum)); // Find a second half RegRecord of double register regNumber firstRegNum = assignedReg->regNum; regNumber secondRegNum = REG_NEXT(firstRegNum); assert(genIsValidFloatReg(secondRegNum) && !genIsValidDoubleReg(secondRegNum)); RegRecord* secondRegRec = getRegisterRecord(secondRegNum); return secondRegRec == regRec; } return false; } //------------------------------------------------------------------------------------------ // getSecondHalfRegRec: Get the second (odd) half of an ARM32 double register // // Arguments: // regRec - A float RegRecord // // Assumptions: // regRec must be a valid double register (i.e. even) // // Return Value: // The RegRecord for the second half of the double register // RegRecord* LinearScan::getSecondHalfRegRec(RegRecord* regRec) { regNumber secondHalfRegNum; RegRecord* secondHalfRegRec; assert(genIsValidDoubleReg(regRec->regNum)); secondHalfRegNum = REG_NEXT(regRec->regNum); secondHalfRegRec = getRegisterRecord(secondHalfRegNum); return secondHalfRegRec; } //------------------------------------------------------------------------------------------ // findAnotherHalfRegRec: Find another half RegRecord which forms same ARM32 double register // // Arguments: // regRec - A float RegRecord // // Assumptions: // None // // Return Value: // A RegRecord which forms same double register with regRec // RegRecord* LinearScan::findAnotherHalfRegRec(RegRecord* regRec) { regNumber anotherHalfRegNum = findAnotherHalfRegNum(regRec->regNum); return getRegisterRecord(anotherHalfRegNum); } //------------------------------------------------------------------------------------------ // findAnotherHalfRegNum: Find another half register's number which forms same ARM32 double register // // Arguments: // regNumber - A float regNumber // // Assumptions: // None // // Return Value: // A register number which forms same double register with regNum. // regNumber LinearScan::findAnotherHalfRegNum(regNumber regNum) { regNumber anotherHalfRegNum; assert(genIsValidFloatReg(regNum)); // Find another half register for TYP_DOUBLE interval, // following same logic in canRestorePreviousInterval(). if (genIsValidDoubleReg(regNum)) { anotherHalfRegNum = REG_NEXT(regNum); assert(!genIsValidDoubleReg(anotherHalfRegNum)); } else { anotherHalfRegNum = REG_PREV(regNum); assert(genIsValidDoubleReg(anotherHalfRegNum)); } return anotherHalfRegNum; } #endif //-------------------------------------------------------------------------------------- // canRestorePreviousInterval: Test if we can restore previous interval // // Arguments: // regRec - a register which contains previous interval to be restored // assignedInterval - an interval just unassigned // // Assumptions: // None // // Return Value: // True only if previous interval of regRec can be restored // bool LinearScan::canRestorePreviousInterval(RegRecord* regRec, Interval* assignedInterval) { bool retVal = (regRec->previousInterval != nullptr && regRec->previousInterval != assignedInterval && regRec->previousInterval->assignedReg == regRec && regRec->previousInterval->getNextRefPosition() != nullptr); #ifdef TARGET_ARM if (retVal && regRec->previousInterval->registerType == TYP_DOUBLE) { RegRecord* anotherHalfRegRec = findAnotherHalfRegRec(regRec); retVal = retVal && anotherHalfRegRec->assignedInterval == nullptr; } #endif return retVal; } bool LinearScan::isAssignedToInterval(Interval* interval, RegRecord* regRec) { bool isAssigned = (interval->assignedReg == regRec); #ifdef TARGET_ARM isAssigned |= isSecondHalfReg(regRec, interval); #endif return isAssigned; } void LinearScan::unassignIntervalBlockStart(RegRecord* regRecord, VarToRegMap inVarToRegMap) { // Is there another interval currently assigned to this register? If so unassign it. Interval* assignedInterval = regRecord->assignedInterval; if (assignedInterval != nullptr) { if (isAssignedToInterval(assignedInterval, regRecord)) { // Only localVars, constants or vector upper halves should be assigned to registers at block boundaries. if (!assignedInterval->isLocalVar) { assert(assignedInterval->isConstant || assignedInterval->IsUpperVector()); // Don't need to update the VarToRegMap. inVarToRegMap = nullptr; } regNumber assignedRegNum = assignedInterval->assignedReg->regNum; // If the interval is active, it will be set to active when we reach its new // register assignment (which we must not yet have done, or it wouldn't still be // assigned to this register). assignedInterval->isActive = false; unassignPhysReg(assignedInterval->assignedReg, nullptr); if ((inVarToRegMap != nullptr) && inVarToRegMap[assignedInterval->getVarIndex(compiler)] == assignedRegNum) { inVarToRegMap[assignedInterval->getVarIndex(compiler)] = REG_STK; } } else { // This interval is no longer assigned to this register. updateAssignedInterval(regRecord, nullptr, assignedInterval->registerType); } } } //------------------------------------------------------------------------ // processBlockStartLocations: Update var locations on entry to 'currentBlock' and clear constant // registers. // // Arguments: // currentBlock - the BasicBlock we are about to allocate registers for // // Return Value: // None // // Notes: // During the allocation pass (allocationPassComplete = false), we use the outVarToRegMap // of the selected predecessor to determine the lclVar locations for the inVarToRegMap. // During the resolution (write-back when allocationPassComplete = true) pass, we only // modify the inVarToRegMap in cases where a lclVar was spilled after the block had been // completed. void LinearScan::processBlockStartLocations(BasicBlock* currentBlock) { // If we have no register candidates we should only call this method during allocation. assert(enregisterLocalVars || !allocationPassComplete); if (!enregisterLocalVars) { // Just clear any constant registers and return. resetAvailableRegs(); for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg)) { RegRecord* physRegRecord = getRegisterRecord(reg); Interval* assignedInterval = physRegRecord->assignedInterval; clearNextIntervalRef(reg, physRegRecord->registerType); clearSpillCost(reg, physRegRecord->registerType); if (assignedInterval != nullptr) { assert(assignedInterval->isConstant); physRegRecord->assignedInterval = nullptr; } } return; } unsigned predBBNum = blockInfo[currentBlock->bbNum].predBBNum; VarToRegMap predVarToRegMap = getOutVarToRegMap(predBBNum); VarToRegMap inVarToRegMap = getInVarToRegMap(currentBlock->bbNum); // If this block enters an exception region, all incoming vars are on the stack. if (predBBNum == 0) { #if DEBUG if (blockInfo[currentBlock->bbNum].hasEHBoundaryIn || !allocationPassComplete) { // This should still be in its initialized empty state. for (unsigned varIndex = 0; varIndex < compiler->lvaTrackedCount; varIndex++) { // In the case where we're extending lifetimes for stress, we are intentionally modeling variables // as live when they really aren't to create extra register pressure & constraints. // However, this means that non-EH-vars will be live into EH regions. We can and should ignore the // locations of these. Note that they aren't reported to codegen anyway. if (!getLsraExtendLifeTimes() || VarSetOps::IsMember(compiler, currentBlock->bbLiveIn, varIndex)) { assert(inVarToRegMap[varIndex] == REG_STK); } } } #endif // DEBUG predVarToRegMap = inVarToRegMap; } VarSetOps::AssignNoCopy(compiler, currentLiveVars, VarSetOps::Intersection(compiler, registerCandidateVars, currentBlock->bbLiveIn)); #ifdef DEBUG if (getLsraExtendLifeTimes()) { VarSetOps::AssignNoCopy(compiler, currentLiveVars, registerCandidateVars); } // If we are rotating register assignments at block boundaries, we want to make the // inactive registers available for the rotation. regMaskTP inactiveRegs = RBM_NONE; #endif // DEBUG regMaskTP liveRegs = RBM_NONE; VarSetOps::Iter iter(compiler, currentLiveVars); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { if (!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate) { continue; } regNumber targetReg; Interval* interval = getIntervalForLocalVar(varIndex); RefPosition* nextRefPosition = interval->getNextRefPosition(); assert((nextRefPosition != nullptr) || (interval->isWriteThru)); bool leaveOnStack = false; // Special handling for variables live in/out of exception handlers. if (interval->isWriteThru) { // There are 3 cases where we will leave writethru lclVars on the stack: // 1) There is no predecessor. // 2) It is conservatively or artificially live - that is, it has no next use, // so there is no place for codegen to record that the register is no longer occupied. // 3) This block has a predecessor with an outgoing EH edge. We won't be able to add "join" // resolution to load the EH var into a register along that edge, so it must be on stack. if ((predBBNum == 0) || (nextRefPosition == nullptr) || (RefTypeIsDef(nextRefPosition->refType)) || blockInfo[currentBlock->bbNum].hasEHPred) { leaveOnStack = true; } } if (!allocationPassComplete) { targetReg = getVarReg(predVarToRegMap, varIndex); if (leaveOnStack) { targetReg = REG_STK; } #ifdef DEBUG regNumber newTargetReg = rotateBlockStartLocation(interval, targetReg, (~liveRegs | inactiveRegs)); if (newTargetReg != targetReg) { targetReg = newTargetReg; setIntervalAsSplit(interval); } #endif // DEBUG setVarReg(inVarToRegMap, varIndex, targetReg); } else // allocationPassComplete (i.e. resolution/write-back pass) { targetReg = getVarReg(inVarToRegMap, varIndex); // There are four cases that we need to consider during the resolution pass: // 1. This variable had a register allocated initially, and it was not spilled in the RefPosition // that feeds this block. In this case, both targetReg and predVarToRegMap[varIndex] will be targetReg. // 2. This variable had not been spilled prior to the end of predBB, but was later spilled, so // predVarToRegMap[varIndex] will be REG_STK, but targetReg is its former allocated value. // In this case, we will normally change it to REG_STK. We will update its "spilled" status when we // encounter it in resolveLocalRef(). // 2a. If the next RefPosition is marked as a copyReg, we need to retain the allocated register. This is // because the copyReg RefPosition will not have recorded the "home" register, yet downstream // RefPositions rely on the correct "home" register. // 3. This variable was spilled before we reached the end of predBB. In this case, both targetReg and // predVarToRegMap[varIndex] will be REG_STK, and the next RefPosition will have been marked // as reload during allocation time if necessary (note that by the time we actually reach the next // RefPosition, we may be using a different predecessor, at which it is still in a register). // 4. This variable was spilled during the allocation of this block, so targetReg is REG_STK // (because we set inVarToRegMap at the time we spilled it), but predVarToRegMap[varIndex] // is not REG_STK. We retain the REG_STK value in the inVarToRegMap. if (targetReg != REG_STK) { if (getVarReg(predVarToRegMap, varIndex) != REG_STK) { // Case #1 above. assert(getVarReg(predVarToRegMap, varIndex) == targetReg || getLsraBlockBoundaryLocations() == LSRA_BLOCK_BOUNDARY_ROTATE); } else if (!nextRefPosition->copyReg) { // case #2 above. setVarReg(inVarToRegMap, varIndex, REG_STK); targetReg = REG_STK; } // Else case 2a. - retain targetReg. } // Else case #3 or #4, we retain targetReg and nothing further to do or assert. } if (interval->physReg == targetReg) { if (interval->isActive) { assert(targetReg != REG_STK); assert(interval->assignedReg != nullptr && interval->assignedReg->regNum == targetReg && interval->assignedReg->assignedInterval == interval); liveRegs |= getRegMask(targetReg, interval->registerType); continue; } } else if (interval->physReg != REG_NA) { // This can happen if we are using the locations from a basic block other than the // immediately preceding one - where the variable was in a different location. if ((targetReg != REG_STK) || leaveOnStack) { // Unassign it from the register (it may get a new register below). if (interval->assignedReg != nullptr && interval->assignedReg->assignedInterval == interval) { interval->isActive = false; unassignPhysReg(getRegisterRecord(interval->physReg), nullptr); } else { // This interval was live in this register the last time we saw a reference to it, // but has since been displaced. interval->physReg = REG_NA; } } else if (!allocationPassComplete) { // Keep the register assignment - if another var has it, it will get unassigned. // Otherwise, resolution will fix it up later, and it will be more // likely to match other assignments this way. targetReg = interval->physReg; interval->isActive = true; liveRegs |= getRegMask(targetReg, interval->registerType); INDEBUG(inactiveRegs |= genRegMask(targetReg)); setVarReg(inVarToRegMap, varIndex, targetReg); } else { interval->physReg = REG_NA; } } if (targetReg != REG_STK) { RegRecord* targetRegRecord = getRegisterRecord(targetReg); liveRegs |= getRegMask(targetReg, interval->registerType); if (!allocationPassComplete) { updateNextIntervalRef(targetReg, interval); updateSpillCost(targetReg, interval); } if (!interval->isActive) { interval->isActive = true; interval->physReg = targetReg; interval->assignedReg = targetRegRecord; } if (targetRegRecord->assignedInterval != interval) { #ifdef TARGET_ARM // If this is a TYP_DOUBLE interval, and the assigned interval is either null or is TYP_FLOAT, // we also need to unassign the other half of the register. // Note that if the assigned interval is TYP_DOUBLE, it will be unassigned below. if ((interval->registerType == TYP_DOUBLE) && ((targetRegRecord->assignedInterval == nullptr) || (targetRegRecord->assignedInterval->registerType == TYP_FLOAT))) { assert(genIsValidDoubleReg(targetReg)); unassignIntervalBlockStart(getSecondHalfRegRec(targetRegRecord), allocationPassComplete ? nullptr : inVarToRegMap); } // If this is a TYP_FLOAT interval, and the assigned interval was TYP_DOUBLE, we also // need to update the liveRegs to specify that the other half is not live anymore. // As mentioned above, for TYP_DOUBLE, the other half will be unassigned further below. if ((interval->registerType == TYP_FLOAT) && ((targetRegRecord->assignedInterval != nullptr) && (targetRegRecord->assignedInterval->registerType == TYP_DOUBLE))) { RegRecord* anotherHalfRegRec = findAnotherHalfRegRec(targetRegRecord); // Use TYP_FLOAT to get the regmask of just the half reg. liveRegs &= ~getRegMask(anotherHalfRegRec->regNum, TYP_FLOAT); } #endif // TARGET_ARM unassignIntervalBlockStart(targetRegRecord, allocationPassComplete ? nullptr : inVarToRegMap); assignPhysReg(targetRegRecord, interval); } if (interval->recentRefPosition != nullptr && !interval->recentRefPosition->copyReg && interval->recentRefPosition->registerAssignment != genRegMask(targetReg)) { interval->getNextRefPosition()->outOfOrder = true; } } } // Unassign any registers that are no longer live, and set register state, if allocating. if (!allocationPassComplete) { resetRegState(); setRegsInUse(liveRegs); } for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg)) { RegRecord* physRegRecord = getRegisterRecord(reg); if ((liveRegs & genRegMask(reg)) == 0) { makeRegAvailable(reg, physRegRecord->registerType); Interval* assignedInterval = physRegRecord->assignedInterval; if (assignedInterval != nullptr) { assert(assignedInterval->isLocalVar || assignedInterval->isConstant || assignedInterval->IsUpperVector()); if (!assignedInterval->isConstant && assignedInterval->assignedReg == physRegRecord) { assignedInterval->isActive = false; if (assignedInterval->getNextRefPosition() == nullptr) { unassignPhysReg(physRegRecord, nullptr); } if (!assignedInterval->IsUpperVector()) { inVarToRegMap[assignedInterval->getVarIndex(compiler)] = REG_STK; } } else { // This interval may still be active, but was in another register in an // intervening block. updateAssignedInterval(physRegRecord, nullptr, assignedInterval->registerType); } #ifdef TARGET_ARM // unassignPhysReg, above, may have restored a 'previousInterval', in which case we need to // get the value of 'physRegRecord->assignedInterval' rather than using 'assignedInterval'. if (physRegRecord->assignedInterval != nullptr) { assignedInterval = physRegRecord->assignedInterval; } if (assignedInterval->registerType == TYP_DOUBLE) { // Skip next float register, because we already addressed a double register assert(genIsValidDoubleReg(reg)); reg = REG_NEXT(reg); makeRegAvailable(reg, physRegRecord->registerType); } #endif // TARGET_ARM } } #ifdef TARGET_ARM else { Interval* assignedInterval = physRegRecord->assignedInterval; if (assignedInterval != nullptr && assignedInterval->registerType == TYP_DOUBLE) { // Skip next float register, because we already addressed a double register assert(genIsValidDoubleReg(reg)); reg = REG_NEXT(reg); } } #endif // TARGET_ARM } } //------------------------------------------------------------------------ // processBlockEndLocations: Record the variables occupying registers after completing the current block. // // Arguments: // currentBlock - the block we have just completed. // // Return Value: // None // // Notes: // This must be called both during the allocation and resolution (write-back) phases. // This is because we need to have the outVarToRegMap locations in order to set the locations // at successor blocks during allocation time, but if lclVars are spilled after a block has been // completed, we need to record the REG_STK location for those variables at resolution time. void LinearScan::processBlockEndLocations(BasicBlock* currentBlock) { assert(currentBlock != nullptr && currentBlock->bbNum == curBBNum); VarToRegMap outVarToRegMap = getOutVarToRegMap(curBBNum); VarSetOps::AssignNoCopy(compiler, currentLiveVars, VarSetOps::Intersection(compiler, registerCandidateVars, currentBlock->bbLiveOut)); #ifdef DEBUG if (getLsraExtendLifeTimes()) { VarSetOps::Assign(compiler, currentLiveVars, registerCandidateVars); } #endif // DEBUG VarSetOps::Iter iter(compiler, currentLiveVars); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { Interval* interval = getIntervalForLocalVar(varIndex); if (interval->isActive) { assert(interval->physReg != REG_NA && interval->physReg != REG_STK); setVarReg(outVarToRegMap, varIndex, interval->physReg); } else { outVarToRegMap[varIndex] = REG_STK; } #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // Ensure that we have no partially-spilled large vector locals. assert(!Compiler::varTypeNeedsPartialCalleeSave(interval->registerType) || !interval->isPartiallySpilled); #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE } INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_END_BB)); } #ifdef DEBUG void LinearScan::dumpRefPositions(const char* str) { printf("------------\n"); printf("REFPOSITIONS %s: \n", str); printf("------------\n"); for (RefPosition& refPos : refPositions) { refPos.dump(this); } } #endif // DEBUG //------------------------------------------------------------------------ // LinearScan::makeRegisterInactive: Make the interval currently assigned to // a register inactive. // // Arguments: // physRegRecord - the RegRecord for the register // // Return Value: // None. // // Notes: // It may be that the RegRecord has already been freed, e.g. due to a kill, // or it may be that the register was a copyReg, so is not the assigned register // of the Interval currently occupying the register, in which case this method has no effect. // void LinearScan::makeRegisterInactive(RegRecord* physRegRecord) { Interval* assignedInterval = physRegRecord->assignedInterval; // It may have already been freed by a "Kill" if ((assignedInterval != nullptr) && (assignedInterval->physReg == physRegRecord->regNum)) { assignedInterval->isActive = false; if (assignedInterval->isConstant) { clearNextIntervalRef(physRegRecord->regNum, assignedInterval->registerType); } } } //------------------------------------------------------------------------ // LinearScan::freeRegister: Make a register available for use // // Arguments: // physRegRecord - the RegRecord for the register to be freed. // // Return Value: // None. // // Assumptions: // None. // It may be that the RegRecord has already been freed, e.g. due to a kill, // in which case this method has no effect. // // Notes: // If there is currently an Interval assigned to this register, and it has // more references (i.e. this is a local last-use, but more uses and/or // defs remain), it will remain assigned to the physRegRecord. However, since // it is marked inactive, the register will be available, albeit less desirable // to allocate. // void LinearScan::freeRegister(RegRecord* physRegRecord) { Interval* assignedInterval = physRegRecord->assignedInterval; makeRegAvailable(physRegRecord->regNum, physRegRecord->registerType); clearSpillCost(physRegRecord->regNum, physRegRecord->registerType); makeRegisterInactive(physRegRecord); if (assignedInterval != nullptr) { // TODO: Under the following conditions we should be just putting it in regsToMakeInactive // not regsToFree. // // We don't unassign in the following conditions: // - If this is a constant node, that we may encounter again, OR // - If its recent RefPosition is not a last-use and its next RefPosition is non-null. // - If there are no more RefPositions, or the next // one is a def. Note that the latter condition doesn't actually ensure that // there aren't subsequent uses that could be reached by a value in the assigned // register, but is merely a heuristic to avoid tying up the register (or using // it when it's non-optimal). A better alternative would be to use SSA, so that // we wouldn't unnecessarily link separate live ranges to the same register. // RefPosition* nextRefPosition = assignedInterval->getNextRefPosition(); if (!assignedInterval->isConstant && (nextRefPosition == nullptr || RefTypeIsDef(nextRefPosition->refType))) { #ifdef TARGET_ARM assert((assignedInterval->registerType != TYP_DOUBLE) || genIsValidDoubleReg(physRegRecord->regNum)); #endif // TARGET_ARM unassignPhysReg(physRegRecord, nullptr); } } } //------------------------------------------------------------------------ // LinearScan::freeRegisters: Free the registers in 'regsToFree' // // Arguments: // regsToFree - the mask of registers to free // void LinearScan::freeRegisters(regMaskTP regsToFree) { if (regsToFree == RBM_NONE) { return; } INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_FREE_REGS)); makeRegsAvailable(regsToFree); while (regsToFree != RBM_NONE) { regMaskTP nextRegBit = genFindLowestBit(regsToFree); regsToFree &= ~nextRegBit; regNumber nextReg = genRegNumFromMask(nextRegBit); RegRecord* regRecord = getRegisterRecord(nextReg); #ifdef TARGET_ARM if (regRecord->assignedInterval != nullptr && (regRecord->assignedInterval->registerType == TYP_DOUBLE)) { assert(genIsValidDoubleReg(nextReg)); regsToFree &= ~(nextRegBit << 1); } #endif freeRegister(regRecord); } } //------------------------------------------------------------------------ // LinearScan::allocateRegisters: Perform the actual register allocation by iterating over // all of the previously constructed Intervals // void LinearScan::allocateRegisters() { JITDUMP("*************** In LinearScan::allocateRegisters()\n"); DBEXEC(VERBOSE, lsraDumpIntervals("before allocateRegisters")); // at start, nothing is active except for register args for (Interval& interval : intervals) { Interval* currentInterval = &interval; currentInterval->recentRefPosition = nullptr; currentInterval->isActive = false; if (currentInterval->isLocalVar) { LclVarDsc* varDsc = currentInterval->getLocalVar(compiler); if (varDsc->lvIsRegArg && currentInterval->firstRefPosition != nullptr) { currentInterval->isActive = true; } } } if (enregisterLocalVars) { #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE VarSetOps::Iter largeVectorVarsIter(compiler, largeVectorVars); unsigned largeVectorVarIndex = 0; while (largeVectorVarsIter.NextElem(&largeVectorVarIndex)) { Interval* lclVarInterval = getIntervalForLocalVar(largeVectorVarIndex); lclVarInterval->isPartiallySpilled = false; } #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE } resetRegState(); for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg)) { RegRecord* physRegRecord = getRegisterRecord(reg); physRegRecord->recentRefPosition = nullptr; updateNextFixedRef(physRegRecord, physRegRecord->firstRefPosition); // Is this an incoming arg register? (Note that we don't, currently, consider reassigning // an incoming arg register as having spill cost.) Interval* interval = physRegRecord->assignedInterval; if (interval != nullptr) { #ifdef TARGET_ARM if ((interval->registerType != TYP_DOUBLE) || genIsValidDoubleReg(reg)) #endif // TARGET_ARM { updateNextIntervalRef(reg, interval); updateSpillCost(reg, interval); setRegInUse(reg, interval->registerType); INDEBUG(registersToDump |= getRegMask(reg, interval->registerType)); } } else { clearNextIntervalRef(reg, physRegRecord->registerType); clearSpillCost(reg, physRegRecord->registerType); } } #ifdef DEBUG if (VERBOSE) { dumpRefPositions("BEFORE ALLOCATION"); dumpVarRefPositions("BEFORE ALLOCATION"); printf("\n\nAllocating Registers\n" "--------------------\n"); // Start with a small set of commonly used registers, so that we don't keep having to print a new title. // Include all the arg regs, as they may already have values assigned to them. registersToDump = LsraLimitSmallIntSet | LsraLimitSmallFPSet | RBM_ARG_REGS; dumpRegRecordHeader(); // Now print an empty "RefPosition", since we complete the dump of the regs at the beginning of the loop. printf(indentFormat, ""); } #endif // DEBUG BasicBlock* currentBlock = nullptr; LsraLocation prevLocation = MinLocation; regMaskTP regsToFree = RBM_NONE; regMaskTP delayRegsToFree = RBM_NONE; regMaskTP regsToMakeInactive = RBM_NONE; regMaskTP delayRegsToMakeInactive = RBM_NONE; regMaskTP copyRegsToFree = RBM_NONE; regsInUseThisLocation = RBM_NONE; regsInUseNextLocation = RBM_NONE; // This is the most recent RefPosition for which a register was allocated // - currently only used for DEBUG but maintained in non-debug, for clarity of code // (and will be optimized away because in non-debug spillAlways() unconditionally returns false) RefPosition* lastAllocatedRefPosition = nullptr; bool handledBlockEnd = false; for (RefPosition& refPositionIterator : refPositions) { RefPosition* currentRefPosition = &refPositionIterator; RefPosition* nextRefPosition = currentRefPosition->nextRefPosition; // TODO: Can we combine this with the freeing of registers below? It might // mess with the dump, since this was previously being done before the call below // to dumpRegRecords. regMaskTP tempRegsToMakeInactive = (regsToMakeInactive | delayRegsToMakeInactive); while (tempRegsToMakeInactive != RBM_NONE) { regMaskTP nextRegBit = genFindLowestBit(tempRegsToMakeInactive); tempRegsToMakeInactive &= ~nextRegBit; regNumber nextReg = genRegNumFromMask(nextRegBit); RegRecord* regRecord = getRegisterRecord(nextReg); clearSpillCost(regRecord->regNum, regRecord->registerType); makeRegisterInactive(regRecord); } if (currentRefPosition->nodeLocation > prevLocation) { makeRegsAvailable(regsToMakeInactive); // TODO: Clean this up. We need to make the delayRegs inactive as well, but don't want // to mark them as free yet. regsToMakeInactive |= delayRegsToMakeInactive; regsToMakeInactive = delayRegsToMakeInactive; delayRegsToMakeInactive = RBM_NONE; } #ifdef DEBUG // Set the activeRefPosition to null until we're done with any boundary handling. activeRefPosition = nullptr; if (VERBOSE) { // We're really dumping the RegRecords "after" the previous RefPosition, but it's more convenient // to do this here, since there are a number of "continue"s in this loop. dumpRegRecords(); } #endif // DEBUG // This is the previousRefPosition of the current Referent, if any RefPosition* previousRefPosition = nullptr; Interval* currentInterval = nullptr; Referenceable* currentReferent = nullptr; RefType refType = currentRefPosition->refType; currentReferent = currentRefPosition->referent; if (spillAlways() && lastAllocatedRefPosition != nullptr && !lastAllocatedRefPosition->IsPhysRegRef() && !lastAllocatedRefPosition->getInterval()->isInternal && (RefTypeIsDef(lastAllocatedRefPosition->refType) || lastAllocatedRefPosition->getInterval()->isLocalVar)) { assert(lastAllocatedRefPosition->registerAssignment != RBM_NONE); RegRecord* regRecord = lastAllocatedRefPosition->getInterval()->assignedReg; unassignPhysReg(regRecord, lastAllocatedRefPosition); // Now set lastAllocatedRefPosition to null, so that we don't try to spill it again lastAllocatedRefPosition = nullptr; } // We wait to free any registers until we've completed all the // uses for the current node. // This avoids reusing registers too soon. // We free before the last true def (after all the uses & internal // registers), and then again at the beginning of the next node. // This is made easier by assigning two LsraLocations per node - one // for all the uses, internal registers & all but the last def, and // another for the final def (if any). LsraLocation currentLocation = currentRefPosition->nodeLocation; // Free at a new location. if (currentLocation > prevLocation) { // CopyRegs are simply made available - we don't want to make the associated interval inactive. makeRegsAvailable(copyRegsToFree); copyRegsToFree = RBM_NONE; regsInUseThisLocation = regsInUseNextLocation; regsInUseNextLocation = RBM_NONE; if ((regsToFree | delayRegsToFree) != RBM_NONE) { freeRegisters(regsToFree); if ((currentLocation > (prevLocation + 1)) && (delayRegsToFree != RBM_NONE)) { // We should never see a delayReg that is delayed until a Location that has no RefPosition // (that would be the RefPosition that it was supposed to interfere with). assert(!"Found a delayRegFree associated with Location with no reference"); // However, to be cautious for the Release build case, we will free them. freeRegisters(delayRegsToFree); delayRegsToFree = RBM_NONE; regsInUseThisLocation = RBM_NONE; } regsToFree = delayRegsToFree; delayRegsToFree = RBM_NONE; #ifdef DEBUG // Validate the current state just after we've freed the registers. This ensures that any pending // freed registers will have had their state updated to reflect the intervals they were holding. for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg)) { regMaskTP regMask = genRegMask(reg); // If this isn't available or if it's still waiting to be freed (i.e. it was in // delayRegsToFree and so now it's in regsToFree), then skip it. if ((regMask & (availableIntRegs | availableFloatRegs) & ~regsToFree) == RBM_NONE) { continue; } RegRecord* physRegRecord = getRegisterRecord(reg); Interval* assignedInterval = physRegRecord->assignedInterval; if (assignedInterval != nullptr) { bool isAssignedReg = (assignedInterval->physReg == reg); RefPosition* recentRefPosition = assignedInterval->recentRefPosition; // If we have a copyReg or a moveReg, we might have assigned this register to an Interval, // but that isn't considered its assignedReg. if (recentRefPosition != nullptr) { if (recentRefPosition->refType == RefTypeExpUse) { // We don't update anything on these, as they're just placeholders to extend the // lifetime. continue; } // For copyReg or moveReg, we don't have anything further to assert. if (recentRefPosition->copyReg || recentRefPosition->moveReg) { continue; } assert(assignedInterval->isConstant == isRegConstant(reg, assignedInterval->registerType)); if (assignedInterval->isActive) { // If this is not the register most recently allocated, it must be from a copyReg, // it was placed there by the inVarToRegMap or it might be one of the upper vector // save/restore refPosition. // In either case it must be a lclVar. if (!isAssignedToInterval(assignedInterval, physRegRecord)) { // We'd like to assert that this was either set by the inVarToRegMap, or by // a copyReg, but we can't traverse backward to check for a copyReg, because // we only have recentRefPosition, and there may be a previous RefPosition // at the same Location with a copyReg. bool sanityCheck = assignedInterval->isLocalVar; // For upper vector interval, make sure it was one of the save/restore only. if (assignedInterval->IsUpperVector()) { sanityCheck |= (recentRefPosition->refType == RefTypeUpperVectorSave) || (recentRefPosition->refType == RefTypeUpperVectorRestore); } assert(sanityCheck); } if (isAssignedReg) { assert(nextIntervalRef[reg] == assignedInterval->getNextRefLocation()); assert(!isRegAvailable(reg, assignedInterval->registerType)); assert((recentRefPosition == nullptr) || (spillCost[reg] == getSpillWeight(physRegRecord))); } else { assert((nextIntervalRef[reg] == MaxLocation) || isRegBusy(reg, assignedInterval->registerType)); } } else { if ((assignedInterval->physReg == reg) && !assignedInterval->isConstant) { assert(nextIntervalRef[reg] == assignedInterval->getNextRefLocation()); } else { assert(nextIntervalRef[reg] == MaxLocation); assert(isRegAvailable(reg, assignedInterval->registerType)); assert(spillCost[reg] == 0); } } } } else { assert(isRegAvailable(reg, physRegRecord->registerType)); assert(!isRegConstant(reg, physRegRecord->registerType)); assert(nextIntervalRef[reg] == MaxLocation); assert(spillCost[reg] == 0); } LsraLocation thisNextFixedRef = physRegRecord->getNextRefLocation(); assert(nextFixedRef[reg] == thisNextFixedRef); #ifdef TARGET_ARM // If this is occupied by a double interval, skip the corresponding float reg. if ((assignedInterval != nullptr) && (assignedInterval->registerType == TYP_DOUBLE)) { reg = REG_NEXT(reg); } #endif } #endif // DEBUG } } prevLocation = currentLocation; // get previous refposition, then current refpos is the new previous if (currentReferent != nullptr) { previousRefPosition = currentReferent->recentRefPosition; currentReferent->recentRefPosition = currentRefPosition; } else { assert((refType == RefTypeBB) || (refType == RefTypeKillGCRefs)); } #ifdef DEBUG activeRefPosition = currentRefPosition; // For the purposes of register resolution, we handle the DummyDefs before // the block boundary - so the RefTypeBB is after all the DummyDefs. // However, for the purposes of allocation, we want to handle the block // boundary first, so that we can free any registers occupied by lclVars // that aren't live in the next block and make them available for the // DummyDefs. // If we've already handled the BlockEnd, but now we're seeing the RefTypeBB, // dump it now. if ((refType == RefTypeBB) && handledBlockEnd) { dumpNewBlock(currentBlock, currentRefPosition->nodeLocation); } #endif // DEBUG if (!handledBlockEnd && (refType == RefTypeBB || refType == RefTypeDummyDef)) { // Free any delayed regs (now in regsToFree) before processing the block boundary freeRegisters(regsToFree); regsToFree = RBM_NONE; regsInUseThisLocation = RBM_NONE; regsInUseNextLocation = RBM_NONE; handledBlockEnd = true; curBBStartLocation = currentRefPosition->nodeLocation; if (currentBlock == nullptr) { currentBlock = startBlockSequence(); INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_START_BB, nullptr, REG_NA, compiler->fgFirstBB)); } else { processBlockEndAllocation(currentBlock); currentBlock = moveToNextBlock(); INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_START_BB, nullptr, REG_NA, currentBlock)); } } if (refType == RefTypeBB) { handledBlockEnd = false; continue; } if (refType == RefTypeKillGCRefs) { spillGCRefs(currentRefPosition); continue; } if (currentRefPosition->isPhysRegRef) { RegRecord* regRecord = currentRefPosition->getReg(); Interval* assignedInterval = regRecord->assignedInterval; updateNextFixedRef(regRecord, currentRefPosition->nextRefPosition); // If this is a FixedReg, disassociate any inactive constant interval from this register. // Otherwise, do nothing. if (refType == RefTypeFixedReg) { if (assignedInterval != nullptr && !assignedInterval->isActive && assignedInterval->isConstant) { clearConstantReg(regRecord->regNum, assignedInterval->registerType); regRecord->assignedInterval = nullptr; spillCost[regRecord->regNum] = 0; #ifdef TARGET_ARM // Update overlapping floating point register for TYP_DOUBLE if (assignedInterval->registerType == TYP_DOUBLE) { RegRecord* otherRegRecord = findAnotherHalfRegRec(regRecord); assert(otherRegRecord->assignedInterval == assignedInterval); otherRegRecord->assignedInterval = nullptr; spillCost[otherRegRecord->regNum] = 0; } #endif // TARGET_ARM } regsInUseThisLocation |= currentRefPosition->registerAssignment; INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_FIXED_REG, nullptr, currentRefPosition->assignedReg())); continue; } if (refType == RefTypeKill) { if (assignedInterval != nullptr) { unassignPhysReg(regRecord, assignedInterval->recentRefPosition); clearConstantReg(regRecord->regNum, assignedInterval->registerType); makeRegAvailable(regRecord->regNum, assignedInterval->registerType); } clearRegBusyUntilKill(regRecord->regNum); INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, regRecord->regNum)); continue; } } // If this is an exposed use, do nothing - this is merely a placeholder to attempt to // ensure that a register is allocated for the full lifetime. The resolution logic // will take care of moving to the appropriate register if needed. if (refType == RefTypeExpUse) { INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_EXP_USE)); currentInterval = currentRefPosition->getInterval(); if (currentInterval->physReg != REG_NA) { updateNextIntervalRef(currentInterval->physReg, currentInterval); } continue; } regNumber assignedRegister = REG_NA; assert(currentRefPosition->isIntervalRef()); currentInterval = currentRefPosition->getInterval(); assert(currentInterval != nullptr); assignedRegister = currentInterval->physReg; // Identify the special cases where we decide up-front not to allocate bool allocate = true; bool didDump = false; if (refType == RefTypeParamDef || refType == RefTypeZeroInit) { if (nextRefPosition == nullptr) { // If it has no actual references, mark it as "lastUse"; since they're not actually part // of any flow they won't have been marked during dataflow. Otherwise, if we allocate a // register we won't unassign it. INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_ZERO_REF, currentInterval)); currentRefPosition->lastUse = true; } LclVarDsc* varDsc = currentInterval->getLocalVar(compiler); assert(varDsc != nullptr); assert(!blockInfo[compiler->fgFirstBB->bbNum].hasEHBoundaryIn || currentInterval->isWriteThru); if (blockInfo[compiler->fgFirstBB->bbNum].hasEHBoundaryIn || blockInfo[compiler->fgFirstBB->bbNum].hasEHPred) { allocate = false; } else if (refType == RefTypeParamDef && (varDsc->lvRefCntWtd() <= BB_UNITY_WEIGHT) && (!currentRefPosition->lastUse || (currentInterval->physReg == REG_STK))) { // If this is a low ref-count parameter, and either it is used (def is not the last use) or it's // passed on the stack, don't allocate a register. // Note that if this is an unused register parameter we don't want to set allocate to false because that // will cause us to allocate stack space to spill it. allocate = false; } else if ((currentInterval->physReg == REG_STK) && nextRefPosition->treeNode->OperIs(GT_BITCAST)) { // In the case of ABI mismatches, avoid allocating a register only to have to immediately move // it to a different register file. allocate = false; } else if ((currentInterval->isWriteThru) && (refType == RefTypeZeroInit)) { // For RefTypeZeroInit which is a write thru, there is no need to allocate register // right away. It can be assigned when actually definition occurs. // In future, see if avoiding allocation for RefTypeZeroInit gives any benefit in general. allocate = false; } if (!allocate) { INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NO_ENTRY_REG_ALLOCATED, currentInterval)); didDump = true; setIntervalAsSpilled(currentInterval); if (assignedRegister != REG_NA) { clearNextIntervalRef(assignedRegister, currentInterval->registerType); clearSpillCost(assignedRegister, currentInterval->registerType); makeRegAvailable(assignedRegister, currentInterval->registerType); } } } #ifdef FEATURE_SIMD #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE else if (currentInterval->isUpperVector) { // This is a save or restore of the upper half of a large vector lclVar. Interval* lclVarInterval = currentInterval->relatedInterval; assert(lclVarInterval->isLocalVar); if (refType == RefTypeUpperVectorSave) { if ((lclVarInterval->physReg == REG_NA) || (lclVarInterval->isPartiallySpilled && (currentInterval->physReg == REG_STK))) { allocate = false; } else { lclVarInterval->isPartiallySpilled = true; } } else if (refType == RefTypeUpperVectorRestore) { assert(currentInterval->isUpperVector); if (lclVarInterval->isPartiallySpilled) { lclVarInterval->isPartiallySpilled = false; } else { allocate = false; } } } else if (refType == RefTypeUpperVectorSave) { assert(!currentInterval->isLocalVar); // Note that this case looks a lot like the case below, but in this case we need to spill // at the previous RefPosition. // We may want to consider allocating two callee-save registers for this case, but it happens rarely // enough that it may not warrant the additional complexity. if (assignedRegister != REG_NA) { unassignPhysReg(getRegisterRecord(assignedRegister), currentInterval->firstRefPosition); INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, currentInterval)); } currentRefPosition->registerAssignment = RBM_NONE; continue; } #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE #endif // FEATURE_SIMD if (allocate == false) { if (assignedRegister != REG_NA) { unassignPhysReg(getRegisterRecord(assignedRegister), currentRefPosition); } else if (!didDump) { INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, currentInterval)); didDump = true; } currentRefPosition->registerAssignment = RBM_NONE; continue; } if (currentInterval->isSpecialPutArg) { assert(!currentInterval->isLocalVar); Interval* srcInterval = currentInterval->relatedInterval; assert(srcInterval != nullptr && srcInterval->isLocalVar); if (refType == RefTypeDef) { assert(srcInterval->recentRefPosition->nodeLocation == currentLocation - 1); RegRecord* physRegRecord = srcInterval->assignedReg; // For a putarg_reg to be special, its next use location has to be the same // as fixed reg's next kill location. Otherwise, if source lcl var's next use // is after the kill of fixed reg but before putarg_reg's next use, fixed reg's // kill would lead to spill of source but not the putarg_reg if it were treated // as special. if (srcInterval->isActive && genRegMask(srcInterval->physReg) == currentRefPosition->registerAssignment && currentInterval->getNextRefLocation() == nextFixedRef[srcInterval->physReg]) { assert(physRegRecord->regNum == srcInterval->physReg); // Special putarg_reg acts as a pass-thru since both source lcl var // and putarg_reg have the same register allocated. Physical reg // record of reg continue to point to source lcl var's interval // instead of to putarg_reg's interval. So if a spill of reg // allocated to source lcl var happens, to reallocate to another // tree node, before its use at call node it will lead to spill of // lcl var instead of putarg_reg since physical reg record is pointing // to lcl var's interval. As a result, arg reg would get trashed leading // to bad codegen. The assumption here is that source lcl var of a // special putarg_reg doesn't get spilled and re-allocated prior to // its use at the call node. This is ensured by marking physical reg // record as busy until next kill. setRegBusyUntilKill(srcInterval->physReg, srcInterval->registerType); } else { currentInterval->isSpecialPutArg = false; } } // If this is still a SpecialPutArg, continue; if (currentInterval->isSpecialPutArg) { INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_SPECIAL_PUTARG, currentInterval, currentRefPosition->assignedReg())); continue; } } if (assignedRegister == REG_NA && RefTypeIsUse(refType)) { currentRefPosition->reload = true; INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_RELOAD, currentInterval, assignedRegister)); } regMaskTP assignedRegBit = RBM_NONE; bool isInRegister = false; if (assignedRegister != REG_NA) { isInRegister = true; assignedRegBit = genRegMask(assignedRegister); if (!currentInterval->isActive) { // If this is a use, it must have started the block on the stack, but the register // was available for use so we kept the association. if (RefTypeIsUse(refType)) { assert(enregisterLocalVars); assert(inVarToRegMaps[curBBNum][currentInterval->getVarIndex(compiler)] == REG_STK && previousRefPosition->nodeLocation <= curBBStartLocation); isInRegister = false; } else { currentInterval->isActive = true; setRegInUse(assignedRegister, currentInterval->registerType); updateSpillCost(assignedRegister, currentInterval); } updateNextIntervalRef(assignedRegister, currentInterval); } assert(currentInterval->assignedReg != nullptr && currentInterval->assignedReg->regNum == assignedRegister && currentInterval->assignedReg->assignedInterval == currentInterval); } if (previousRefPosition != nullptr) { assert(previousRefPosition->nextRefPosition == currentRefPosition); assert(assignedRegister == REG_NA || assignedRegBit == previousRefPosition->registerAssignment || currentRefPosition->outOfOrder || previousRefPosition->copyReg || previousRefPosition->refType == RefTypeExpUse || currentRefPosition->refType == RefTypeDummyDef); } else if (assignedRegister != REG_NA) { // Handle the case where this is a preassigned register (i.e. parameter). // We don't want to actually use the preassigned register if it's not // going to cover the lifetime - but we had to preallocate it to ensure // that it remained live. // TODO-CQ: At some point we may want to refine the analysis here, in case // it might be beneficial to keep it in this reg for PART of the lifetime if (currentInterval->isLocalVar) { regMaskTP preferences = currentInterval->registerPreferences; bool keepAssignment = true; bool matchesPreferences = (preferences & genRegMask(assignedRegister)) != RBM_NONE; // Will the assigned register cover the lifetime? If not, does it at least // meet the preferences for the next RefPosition? LsraLocation nextPhysRegLocation = nextFixedRef[assignedRegister]; if (nextPhysRegLocation <= currentInterval->lastRefPosition->nodeLocation) { // Check to see if the existing assignment matches the preferences (e.g. callee save registers) // and ensure that the next use of this localVar does not occur after the nextPhysRegRefPos // There must be a next RefPosition, because we know that the Interval extends beyond the // nextPhysRegRefPos. assert(nextRefPosition != nullptr); if (!matchesPreferences || nextPhysRegLocation < nextRefPosition->nodeLocation) { keepAssignment = false; } else if ((nextRefPosition->registerAssignment != assignedRegBit) && (nextPhysRegLocation <= nextRefPosition->getRefEndLocation())) { keepAssignment = false; } } else if (refType == RefTypeParamDef && !matchesPreferences) { // Don't use the register, even if available, if it doesn't match the preferences. // Note that this case is only for ParamDefs, for which we haven't yet taken preferences // into account (we've just automatically got the initial location). In other cases, // we would already have put it in a preferenced register, if it was available. // TODO-CQ: Consider expanding this to check availability - that would duplicate // code here, but otherwise we may wind up in this register anyway. keepAssignment = false; } if (keepAssignment == false) { RegRecord* physRegRecord = getRegisterRecord(currentInterval->physReg); currentRefPosition->registerAssignment = allRegs(currentInterval->registerType); currentRefPosition->isFixedRegRef = false; unassignPhysRegNoSpill(physRegRecord); // If the preferences are currently set to just this register, reset them to allRegs // of the appropriate type (just as we just reset the registerAssignment for this // RefPosition. // Otherwise, simply remove this register from the preferences, if it's there. if (currentInterval->registerPreferences == assignedRegBit) { currentInterval->registerPreferences = currentRefPosition->registerAssignment; } else { currentInterval->registerPreferences &= ~assignedRegBit; } assignedRegister = REG_NA; assignedRegBit = RBM_NONE; } } } if (assignedRegister != REG_NA) { RegRecord* physRegRecord = getRegisterRecord(assignedRegister); assert((assignedRegBit == currentRefPosition->registerAssignment) || (physRegRecord->assignedInterval == currentInterval) || !isRegInUse(assignedRegister, currentInterval->registerType)); if (conflictingFixedRegReference(assignedRegister, currentRefPosition)) { // We may have already reassigned the register to the conflicting reference. // If not, we need to unassign this interval. if (physRegRecord->assignedInterval == currentInterval) { unassignPhysRegNoSpill(physRegRecord); physRegRecord->assignedInterval = nullptr; clearConstantReg(assignedRegister, currentInterval->registerType); } currentRefPosition->moveReg = true; assignedRegister = REG_NA; currentRefPosition->registerAssignment &= ~assignedRegBit; setIntervalAsSplit(currentInterval); INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_MOVE_REG, currentInterval, assignedRegister)); } else if ((genRegMask(assignedRegister) & currentRefPosition->registerAssignment) != 0) { currentRefPosition->registerAssignment = assignedRegBit; if (!currentInterval->isActive) { // If we've got an exposed use at the top of a block, the // interval might not have been active. Otherwise if it's a use, // the interval must be active. if (refType == RefTypeDummyDef) { currentInterval->isActive = true; assert(getRegisterRecord(assignedRegister)->assignedInterval == currentInterval); } else { currentRefPosition->reload = true; } } INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, currentInterval, assignedRegister)); } else { // It's already in a register, but not one we need. if (!RefTypeIsDef(currentRefPosition->refType)) { regNumber copyReg = assignCopyReg(currentRefPosition); lastAllocatedRefPosition = currentRefPosition; bool unassign = false; if (currentInterval->isWriteThru) { if (currentRefPosition->refType == RefTypeDef) { currentRefPosition->writeThru = true; } if (!currentRefPosition->lastUse) { if (currentRefPosition->spillAfter) { unassign = true; } } } regMaskTP copyRegMask = getRegMask(copyReg, currentInterval->registerType); regMaskTP assignedRegMask = getRegMask(assignedRegister, currentInterval->registerType); regsInUseThisLocation |= copyRegMask | assignedRegMask; if (currentRefPosition->lastUse) { if (currentRefPosition->delayRegFree) { INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_LAST_USE_DELAYED, currentInterval, assignedRegister)); delayRegsToFree |= copyRegMask | assignedRegMask; regsInUseNextLocation |= copyRegMask | assignedRegMask; } else { INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_LAST_USE, currentInterval, assignedRegister)); regsToFree |= copyRegMask | assignedRegMask; } } else { copyRegsToFree |= copyRegMask; if (currentRefPosition->delayRegFree) { regsInUseNextLocation |= copyRegMask | assignedRegMask; } } // If this is a tree temp (non-localVar) interval, we will need an explicit move. // Note: In theory a moveReg should cause the Interval to now have the new reg as its // assigned register. However, that's not currently how this works. // If we ever actually move lclVar intervals instead of copying, this will need to change. if (!currentInterval->isLocalVar) { currentRefPosition->moveReg = true; currentRefPosition->copyReg = false; } clearNextIntervalRef(copyReg, currentInterval->registerType); clearSpillCost(copyReg, currentInterval->registerType); updateNextIntervalRef(assignedRegister, currentInterval); updateSpillCost(assignedRegister, currentInterval); continue; } else { INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NEEDS_NEW_REG, nullptr, assignedRegister)); regsToFree |= getRegMask(assignedRegister, currentInterval->registerType); // We want a new register, but we don't want this to be considered a spill. assignedRegister = REG_NA; if (physRegRecord->assignedInterval == currentInterval) { unassignPhysRegNoSpill(physRegRecord); } } } } if (assignedRegister == REG_NA) { if (currentRefPosition->RegOptional()) { // We can avoid allocating a register if it is a last use requiring a reload. if (currentRefPosition->lastUse && currentRefPosition->reload) { allocate = false; } else if (currentInterval->isWriteThru) { // Don't allocate if the next reference is in a cold block. if (nextRefPosition == nullptr || (nextRefPosition->nodeLocation >= firstColdLoc)) { allocate = false; } } #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE && defined(TARGET_XARCH) // We can also avoid allocating a register (in fact we don't want to) if we have // an UpperVectorRestore on xarch where the value is on the stack. if ((currentRefPosition->refType == RefTypeUpperVectorRestore) && (currentInterval->physReg == REG_NA)) { assert(currentRefPosition->regOptional); allocate = false; } #endif #ifdef DEBUG // Under stress mode, don't allocate registers to RegOptional RefPositions. if (allocate && regOptionalNoAlloc()) { allocate = false; } #endif } RegisterScore registerScore = NONE; if (allocate) { // Allocate a register, if we must, or if it is profitable to do so. // If we have a fixed reg requirement, and the interval is inactive in another register, // unassign that register. if (currentRefPosition->isFixedRegRef && !currentInterval->isActive && (currentInterval->assignedReg != nullptr) && (currentInterval->assignedReg->assignedInterval == currentInterval) && (genRegMask(currentInterval->assignedReg->regNum) != currentRefPosition->registerAssignment)) { unassignPhysReg(currentInterval->assignedReg, nullptr); } assignedRegister = allocateReg(currentInterval, currentRefPosition DEBUG_ARG(&registerScore)); } // If no register was found, this RefPosition must not require a register. if (assignedRegister == REG_NA) { assert(currentRefPosition->RegOptional()); INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, currentInterval)); currentRefPosition->registerAssignment = RBM_NONE; currentRefPosition->reload = false; currentInterval->isActive = false; setIntervalAsSpilled(currentInterval); } #ifdef DEBUG else { if (VERBOSE) { if (currentInterval->isConstant && (currentRefPosition->treeNode != nullptr) && currentRefPosition->treeNode->IsReuseRegVal()) { dumpLsraAllocationEvent(LSRA_EVENT_REUSE_REG, currentInterval, assignedRegister, currentBlock, registerScore); } else { dumpLsraAllocationEvent(LSRA_EVENT_ALLOC_REG, currentInterval, assignedRegister, currentBlock, registerScore); } } } #endif // DEBUG if (refType == RefTypeDummyDef && assignedRegister != REG_NA) { setInVarRegForBB(curBBNum, currentInterval->varNum, assignedRegister); } // If we allocated a register, and this is a use of a spilled value, // it should have been marked for reload above. if (assignedRegister != REG_NA && RefTypeIsUse(refType) && !isInRegister) { assert(currentRefPosition->reload); } } // If we allocated a register, record it if (assignedRegister != REG_NA) { assignedRegBit = genRegMask(assignedRegister); regMaskTP regMask = getRegMask(assignedRegister, currentInterval->registerType); regsInUseThisLocation |= regMask; if (currentRefPosition->delayRegFree) { regsInUseNextLocation |= regMask; } currentRefPosition->registerAssignment = assignedRegBit; currentInterval->physReg = assignedRegister; regsToFree &= ~regMask; // we'll set it again later if it's dead // If this interval is dead, free the register. // The interval could be dead if this is a user variable, or if the // node is being evaluated for side effects, or a call whose result // is not used, etc. // If this is an UpperVector we'll neither free it nor preference it // (it will be freed when it is used). bool unassign = false; if (!currentInterval->IsUpperVector()) { if (currentInterval->isWriteThru) { if (currentRefPosition->refType == RefTypeDef) { currentRefPosition->writeThru = true; } if (!currentRefPosition->lastUse) { if (currentRefPosition->spillAfter) { unassign = true; } } } if (currentRefPosition->lastUse || currentRefPosition->nextRefPosition == nullptr) { assert(currentRefPosition->isIntervalRef()); // If this isn't a final use, we'll mark the register as available, but keep the association. if ((refType != RefTypeExpUse) && (currentRefPosition->nextRefPosition == nullptr)) { unassign = true; } else { if (currentRefPosition->delayRegFree) { delayRegsToMakeInactive |= regMask; } else { regsToMakeInactive |= regMask; } // TODO-Cleanup: this makes things consistent with previous, and will enable preferences // to be propagated, but it seems less than ideal. currentInterval->isActive = false; } // Update the register preferences for the relatedInterval, if this is 'preferencedToDef'. // Don't propagate to subsequent relatedIntervals; that will happen as they are allocated, and we // don't know yet whether the register will be retained. if (currentInterval->relatedInterval != nullptr) { currentInterval->relatedInterval->updateRegisterPreferences(assignedRegBit); } } if (unassign) { if (currentRefPosition->delayRegFree) { delayRegsToFree |= regMask; INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_LAST_USE_DELAYED)); } else { regsToFree |= regMask; INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_LAST_USE)); } } } if (!unassign) { updateNextIntervalRef(assignedRegister, currentInterval); updateSpillCost(assignedRegister, currentInterval); } } lastAllocatedRefPosition = currentRefPosition; } #ifdef JIT32_GCENCODER // For the JIT32_GCENCODER, when lvaKeepAliveAndReportThis is true, we must either keep the "this" pointer // in the same register for the entire method, or keep it on the stack. Rather than imposing this constraint // as we allocate, we will force all refs to the stack if it is split or spilled. if (enregisterLocalVars && compiler->lvaKeepAliveAndReportThis()) { LclVarDsc* thisVarDsc = compiler->lvaGetDesc(compiler->info.compThisArg); if (thisVarDsc->lvLRACandidate) { Interval* interval = getIntervalForLocalVar(thisVarDsc->lvVarIndex); if (interval->isSplit) { // We'll have to spill this. setIntervalAsSpilled(interval); } if (interval->isSpilled) { unsigned prevBBNum = 0; for (RefPosition* ref = interval->firstRefPosition; ref != nullptr; ref = ref->nextRefPosition) { // For the resolution phase, we need to ensure that any block with exposed uses has the // incoming reg for 'this' as REG_STK. if (RefTypeIsUse(ref->refType) && (ref->bbNum != prevBBNum)) { VarToRegMap inVarToRegMap = getInVarToRegMap(ref->bbNum); setVarReg(inVarToRegMap, thisVarDsc->lvVarIndex, REG_STK); } if (ref->RegOptional()) { ref->registerAssignment = RBM_NONE; ref->reload = false; ref->spillAfter = false; } switch (ref->refType) { case RefTypeDef: if (ref->registerAssignment != RBM_NONE) { ref->spillAfter = true; } break; case RefTypeUse: if (ref->registerAssignment != RBM_NONE) { ref->reload = true; ref->spillAfter = true; ref->copyReg = false; ref->moveReg = false; } break; default: break; } prevBBNum = ref->bbNum; } } } } #endif // JIT32_GCENCODER // Free registers to clear associated intervals for resolution phase CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (getLsraExtendLifeTimes()) { // If we have extended lifetimes, we need to make sure all the registers are freed. for (size_t regNumIndex = 0; regNumIndex <= REG_FP_LAST; regNumIndex++) { RegRecord& regRecord = physRegs[regNumIndex]; Interval* interval = regRecord.assignedInterval; if (interval != nullptr) { interval->isActive = false; unassignPhysReg(&regRecord, nullptr); } } } else #endif // DEBUG { freeRegisters(regsToFree | delayRegsToFree); } #ifdef DEBUG if (VERBOSE) { // Dump the RegRecords after the last RefPosition is handled. dumpRegRecords(); printf("\n"); dumpRefPositions("AFTER ALLOCATION"); dumpVarRefPositions("AFTER ALLOCATION"); // Dump the intervals that remain active printf("Active intervals at end of allocation:\n"); // We COULD just reuse the intervalIter from above, but ArrayListIterator doesn't // provide a Reset function (!) - we'll probably replace this so don't bother // adding it for (Interval& interval : intervals) { if (interval.isActive) { printf("Active "); interval.dump(); } } printf("\n"); } #endif // DEBUG } //----------------------------------------------------------------------------- // updateAssignedInterval: Update assigned interval of register. // // Arguments: // reg - register to be updated // interval - interval to be assigned // regType - register type // // Return Value: // None // // Note: // For ARM32, two float registers consisting a double register are updated // together when "regType" is TYP_DOUBLE. // void LinearScan::updateAssignedInterval(RegRecord* reg, Interval* interval, RegisterType regType) { #ifdef TARGET_ARM // Update overlapping floating point register for TYP_DOUBLE. Interval* oldAssignedInterval = reg->assignedInterval; regNumber doubleReg = REG_NA; if (regType == TYP_DOUBLE) { RegRecord* anotherHalfReg = findAnotherHalfRegRec(reg); doubleReg = genIsValidDoubleReg(reg->regNum) ? reg->regNum : anotherHalfReg->regNum; anotherHalfReg->assignedInterval = interval; } else if ((oldAssignedInterval != nullptr) && (oldAssignedInterval->registerType == TYP_DOUBLE)) { RegRecord* anotherHalfReg = findAnotherHalfRegRec(reg); doubleReg = genIsValidDoubleReg(reg->regNum) ? reg->regNum : anotherHalfReg->regNum; anotherHalfReg->assignedInterval = nullptr; } if (doubleReg != REG_NA) { clearNextIntervalRef(doubleReg, TYP_DOUBLE); clearSpillCost(doubleReg, TYP_DOUBLE); clearConstantReg(doubleReg, TYP_DOUBLE); } #endif reg->assignedInterval = interval; if (interval != nullptr) { setRegInUse(reg->regNum, interval->registerType); if (interval->isConstant) { setConstantReg(reg->regNum, interval->registerType); } else { clearConstantReg(reg->regNum, interval->registerType); } updateNextIntervalRef(reg->regNum, interval); updateSpillCost(reg->regNum, interval); } else { clearNextIntervalRef(reg->regNum, reg->registerType); clearSpillCost(reg->regNum, reg->registerType); } } //----------------------------------------------------------------------------- // updatePreviousInterval: Update previous interval of register. // // Arguments: // reg - register to be updated // interval - interval to be assigned // regType - register type // // Return Value: // None // // Assumptions: // For ARM32, when "regType" is TYP_DOUBLE, "reg" should be a even-numbered // float register, i.e. lower half of double register. // // Note: // For ARM32, two float registers consisting a double register are updated // together when "regType" is TYP_DOUBLE. // void LinearScan::updatePreviousInterval(RegRecord* reg, Interval* interval, RegisterType regType) { reg->previousInterval = interval; #ifdef TARGET_ARM // Update overlapping floating point register for TYP_DOUBLE if (regType == TYP_DOUBLE) { RegRecord* anotherHalfReg = findAnotherHalfRegRec(reg); anotherHalfReg->previousInterval = interval; } #endif } //----------------------------------------------------------------------------- // writeLocalReg: Write the register assignment for a GT_LCL_VAR node. // // Arguments: // lclNode - The GT_LCL_VAR node // varNum - The variable number for the register // reg - The assigned register // // Return Value: // None // // Note: // For a multireg node, 'varNum' will be the field local for the given register. // void LinearScan::writeLocalReg(GenTreeLclVar* lclNode, unsigned varNum, regNumber reg) { assert((lclNode->GetLclNum() == varNum) == !lclNode->IsMultiReg()); if (lclNode->GetLclNum() == varNum) { lclNode->SetRegNum(reg); } else { assert(compiler->lvaEnregMultiRegVars); LclVarDsc* parentVarDsc = compiler->lvaGetDesc(lclNode); assert(parentVarDsc->lvPromoted); unsigned regIndex = varNum - parentVarDsc->lvFieldLclStart; assert(regIndex < MAX_MULTIREG_COUNT); lclNode->SetRegNumByIdx(reg, regIndex); } } //----------------------------------------------------------------------------- // LinearScan::resolveLocalRef // Description: // Update the graph for a local reference. // Also, track the register (if any) that is currently occupied. // Arguments: // treeNode: The lclVar that's being resolved // currentRefPosition: the RefPosition associated with the treeNode // // Details: // This method is called for each local reference, during the resolveRegisters // phase of LSRA. It is responsible for keeping the following in sync: // - varDsc->GetRegNum() (and GetOtherReg()) contain the unique register location. // If it is not in the same register through its lifetime, it is set to REG_STK. // - interval->physReg is set to the assigned register // (i.e. at the code location which is currently being handled by resolveRegisters()) // - interval->isActive is true iff the interval is live and occupying a register // - interval->isSpilled should have already been set to true if the interval is EVER spilled // - interval->isSplit is set to true if the interval does not occupy the same // register throughout the method // - RegRecord->assignedInterval points to the interval which currently occupies // the register // - For each lclVar node: // - GetRegNum()/gtRegPair is set to the currently allocated register(s). // - GTF_SPILLED is set on a use if it must be reloaded prior to use. // - GTF_SPILL is set if it must be spilled after use. // // A copyReg is an ugly case where the variable must be in a specific (fixed) register, // but it currently resides elsewhere. The register allocator must track the use of the // fixed register, but it marks the lclVar node with the register it currently lives in // and the code generator does the necessary move. // // Before beginning, the varDsc for each parameter must be set to its initial location. // // NICE: Consider tracking whether an Interval is always in the same location (register/stack) // in which case it will require no resolution. // void LinearScan::resolveLocalRef(BasicBlock* block, GenTreeLclVar* treeNode, RefPosition* currentRefPosition) { assert((block == nullptr) == (treeNode == nullptr)); assert(enregisterLocalVars); // Is this a tracked local? Or just a register allocated for loading // a non-tracked one? Interval* interval = currentRefPosition->getInterval(); assert(interval->isLocalVar); interval->recentRefPosition = currentRefPosition; LclVarDsc* varDsc = interval->getLocalVar(compiler); // NOTE: we set the LastUse flag here unless we are extending lifetimes, in which case we write // this bit in checkLastUses. This is a bit of a hack, but is necessary because codegen requires // accurate last use info that is not reflected in the lastUse bit on ref positions when we are extending // lifetimes. See also the comments in checkLastUses. if ((treeNode != nullptr) && !extendLifetimes()) { if (currentRefPosition->lastUse) { treeNode->SetLastUse(currentRefPosition->getMultiRegIdx()); } else { treeNode->ClearLastUse(currentRefPosition->getMultiRegIdx()); } if ((currentRefPosition->registerAssignment != RBM_NONE) && (interval->physReg == REG_NA) && currentRefPosition->RegOptional() && currentRefPosition->lastUse && (currentRefPosition->refType == RefTypeUse)) { // This can happen if the incoming location for the block was changed from a register to the stack // during resolution. In this case we're better off making it contained. assert(inVarToRegMaps[curBBNum][varDsc->lvVarIndex] == REG_STK); currentRefPosition->registerAssignment = RBM_NONE; writeLocalReg(treeNode->AsLclVar(), interval->varNum, REG_NA); } } if (currentRefPosition->registerAssignment == RBM_NONE) { assert(currentRefPosition->RegOptional()); assert(interval->isSpilled); varDsc->SetRegNum(REG_STK); if (interval->assignedReg != nullptr && interval->assignedReg->assignedInterval == interval) { updateAssignedInterval(interval->assignedReg, nullptr, interval->registerType); } interval->assignedReg = nullptr; interval->physReg = REG_NA; interval->isActive = false; // Set this as contained if it is not a multi-reg (we could potentially mark it s contained // if all uses are from spill, but that adds complexity. if ((currentRefPosition->refType == RefTypeUse) && !treeNode->IsMultiReg()) { assert(treeNode != nullptr); treeNode->SetContained(); } return; } // In most cases, assigned and home registers will be the same // The exception is the copyReg case, where we've assigned a register // for a specific purpose, but will be keeping the register assignment regNumber assignedReg = currentRefPosition->assignedReg(); regNumber homeReg = assignedReg; // Undo any previous association with a physical register, UNLESS this // is a copyReg if (!currentRefPosition->copyReg) { regNumber oldAssignedReg = interval->physReg; if (oldAssignedReg != REG_NA && assignedReg != oldAssignedReg) { RegRecord* oldRegRecord = getRegisterRecord(oldAssignedReg); if (oldRegRecord->assignedInterval == interval) { updateAssignedInterval(oldRegRecord, nullptr, interval->registerType); } } } if (currentRefPosition->refType == RefTypeUse && !currentRefPosition->reload) { // Was this spilled after our predecessor was scheduled? if (interval->physReg == REG_NA) { assert(inVarToRegMaps[curBBNum][varDsc->lvVarIndex] == REG_STK); currentRefPosition->reload = true; } } bool reload = currentRefPosition->reload; bool spillAfter = currentRefPosition->spillAfter; bool writeThru = currentRefPosition->writeThru; // In the reload case we either: // - Set the register to REG_STK if it will be referenced only from the home location, or // - Set the register to the assigned register and set GTF_SPILLED if it must be loaded into a register. if (reload) { assert(currentRefPosition->refType != RefTypeDef); assert(interval->isSpilled); varDsc->SetRegNum(REG_STK); if (!spillAfter) { interval->physReg = assignedReg; } // If there is no treeNode, this must be a RefTypeExpUse, in // which case we did the reload already if (treeNode != nullptr) { treeNode->gtFlags |= GTF_SPILLED; if (treeNode->IsMultiReg()) { treeNode->SetRegSpillFlagByIdx(GTF_SPILLED, currentRefPosition->getMultiRegIdx()); } if (spillAfter) { if (currentRefPosition->RegOptional()) { // This is a use of lclVar that is flagged as reg-optional // by lower/codegen and marked for both reload and spillAfter. // In this case we can avoid unnecessary reload and spill // by setting reg on lclVar to REG_STK and reg on tree node // to REG_NA. Codegen will generate the code by considering // it as a contained memory operand. // // Note that varDsc->GetRegNum() is already to REG_STK above. interval->physReg = REG_NA; writeLocalReg(treeNode->AsLclVar(), interval->varNum, REG_NA); treeNode->gtFlags &= ~GTF_SPILLED; treeNode->SetContained(); // We don't support RegOptional for multi-reg localvars. assert(!treeNode->IsMultiReg()); } else { treeNode->gtFlags |= GTF_SPILL; if (treeNode->IsMultiReg()) { treeNode->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx()); } } } } else { assert(currentRefPosition->refType == RefTypeExpUse); } } else if (spillAfter && !RefTypeIsUse(currentRefPosition->refType) && (treeNode != nullptr) && (!treeNode->IsMultiReg() || treeNode->gtGetOp1()->IsMultiRegNode())) { // In the case of a pure def, don't bother spilling - just assign it to the // stack. However, we need to remember that it was spilled. // We can't do this in the case of a multi-reg node with a non-multireg source as // we need the register to extract into. assert(interval->isSpilled); varDsc->SetRegNum(REG_STK); interval->physReg = REG_NA; writeLocalReg(treeNode->AsLclVar(), interval->varNum, REG_NA); } else // Not reload and Not pure-def that's spillAfter { if (currentRefPosition->copyReg || currentRefPosition->moveReg) { // For a copyReg or moveReg, we have two cases: // - In the first case, we have a fixedReg - i.e. a register which the code // generator is constrained to use. // The code generator will generate the appropriate move to meet the requirement. // - In the second case, we were forced to use a different register because of // interference (or JitStressRegs). // In this case, we generate a GT_COPY. // In either case, we annotate the treeNode with the register in which the value // currently lives. For moveReg, the homeReg is the new register (as assigned above). // But for copyReg, the homeReg remains unchanged. assert(treeNode != nullptr); writeLocalReg(treeNode->AsLclVar(), interval->varNum, interval->physReg); if (currentRefPosition->copyReg) { homeReg = interval->physReg; } else { assert(interval->isSplit); interval->physReg = assignedReg; } if (!currentRefPosition->isFixedRegRef || currentRefPosition->moveReg) { // This is the second case, where we need to generate a copy insertCopyOrReload(block, treeNode, currentRefPosition->getMultiRegIdx(), currentRefPosition); } } else { interval->physReg = assignedReg; if (!interval->isSpilled && !interval->isSplit) { if (varDsc->GetRegNum() != REG_STK) { // If the register assignments don't match, then this interval is split. if (varDsc->GetRegNum() != assignedReg) { setIntervalAsSplit(interval); varDsc->SetRegNum(REG_STK); } } else { varDsc->SetRegNum(assignedReg); } } } if (spillAfter) { if (treeNode != nullptr) { treeNode->gtFlags |= GTF_SPILL; if (treeNode->IsMultiReg()) { treeNode->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx()); } } assert(interval->isSpilled); interval->physReg = REG_NA; varDsc->SetRegNum(REG_STK); } if (writeThru && (treeNode != nullptr)) { // This is a def of a write-thru EH var (only defs are marked 'writeThru'). treeNode->gtFlags |= GTF_SPILL; // We also mark writeThru defs that are not last-use with GTF_SPILLED to indicate that they are conceptually // spilled and immediately "reloaded", i.e. the register remains live. // Note that we can have a "last use" write that has no exposed uses in the standard // (non-eh) control flow, but that may be used on an exception path. Hence the need // to retain these defs, and to ensure that they write. if (!currentRefPosition->lastUse) { treeNode->gtFlags |= GTF_SPILLED; if (treeNode->IsMultiReg()) { treeNode->SetRegSpillFlagByIdx(GTF_SPILLED, currentRefPosition->getMultiRegIdx()); } } } if (currentRefPosition->singleDefSpill && (treeNode != nullptr)) { // This is the first (and only) def of a single-def var (only defs are marked 'singleDefSpill'). // Mark it as GTF_SPILL, so it is spilled immediately to the stack at definition and // GTF_SPILLED, so the variable stays live in the register. // // TODO: This approach would still create the resolution moves but during codegen, will check for // `lvSpillAtSingleDef` to decide whether to generate spill or not. In future, see if there is some // better way to avoid resolution moves, perhaps by updating the varDsc->SetRegNum(REG_STK) in this // method? treeNode->gtFlags |= GTF_SPILL; treeNode->gtFlags |= GTF_SPILLED; if (treeNode->IsMultiReg()) { treeNode->SetRegSpillFlagByIdx(GTF_SPILLED, currentRefPosition->getMultiRegIdx()); } varDsc->lvSpillAtSingleDef = true; } } // Update the physRegRecord for the register, so that we know what vars are in // regs at the block boundaries RegRecord* physRegRecord = getRegisterRecord(homeReg); if (spillAfter || currentRefPosition->lastUse) { interval->isActive = false; interval->assignedReg = nullptr; interval->physReg = REG_NA; updateAssignedInterval(physRegRecord, nullptr, interval->registerType); } else { interval->isActive = true; interval->assignedReg = physRegRecord; updateAssignedInterval(physRegRecord, interval, interval->registerType); } } void LinearScan::writeRegisters(RefPosition* currentRefPosition, GenTree* tree) { lsraAssignRegToTree(tree, currentRefPosition->assignedReg(), currentRefPosition->getMultiRegIdx()); } //------------------------------------------------------------------------ // insertCopyOrReload: Insert a copy in the case where a tree node value must be moved // to a different register at the point of use (GT_COPY), or it is reloaded to a different register // than the one it was spilled from (GT_RELOAD). // // Arguments: // block - basic block in which GT_COPY/GT_RELOAD is inserted. // tree - This is the node to copy or reload. // Insert copy or reload node between this node and its parent. // multiRegIdx - register position of tree node for which copy or reload is needed. // refPosition - The RefPosition at which copy or reload will take place. // // Notes: // The GT_COPY or GT_RELOAD will be inserted in the proper spot in execution order where the reload is to occur. // // For example, for this tree (numbers are execution order, lower is earlier and higher is later): // // +---------+----------+ // | GT_ADD (3) | // +---------+----------+ // | // / '\' // / '\' // / '\' // +-------------------+ +----------------------+ // | x (1) | "tree" | y (2) | // +-------------------+ +----------------------+ // // generate this tree: // // +---------+----------+ // | GT_ADD (4) | // +---------+----------+ // | // / '\' // / '\' // / '\' // +-------------------+ +----------------------+ // | GT_RELOAD (3) | | y (2) | // +-------------------+ +----------------------+ // | // +-------------------+ // | x (1) | "tree" // +-------------------+ // // Note in particular that the GT_RELOAD node gets inserted in execution order immediately before the parent of "tree", // which seems a bit weird since normally a node's parent (in this case, the parent of "x", GT_RELOAD in the "after" // picture) immediately follows all of its children (that is, normally the execution ordering is postorder). // The ordering must be this weird "out of normal order" way because the "x" node is being spilled, probably // because the expression in the tree represented above by "y" has high register requirements. We don't want // to reload immediately, of course. So we put GT_RELOAD where the reload should actually happen. // // Note that GT_RELOAD is required when we reload to a different register than the one we spilled to. It can also be // used if we reload to the same register. Normally, though, in that case we just mark the node with GTF_SPILLED, // and the unspilling code automatically reuses the same register, and does the reload when it notices that flag // when considering a node's operands. // void LinearScan::insertCopyOrReload(BasicBlock* block, GenTree* tree, unsigned multiRegIdx, RefPosition* refPosition) { LIR::Range& blockRange = LIR::AsRange(block); LIR::Use treeUse; bool foundUse = blockRange.TryGetUse(tree, &treeUse); assert(foundUse); GenTree* parent = treeUse.User(); genTreeOps oper; if (refPosition->reload) { oper = GT_RELOAD; } else { oper = GT_COPY; INTRACK_STATS(updateLsraStat(STAT_COPY_REG, block->bbNum)); } // If the parent is a reload/copy node, then tree must be a multi-reg node // that has already had one of its registers spilled. // It is possible that one of its RefTypeDef positions got spilled and the next // use of it requires it to be in a different register. // // In this case set the i'th position reg of reload/copy node to the reg allocated // for copy/reload refPosition. Essentially a copy/reload node will have a reg // for each multi-reg position of its child. If there is a valid reg in i'th // position of GT_COPY or GT_RELOAD node then the corresponding result of its // child needs to be copied or reloaded to that reg. if (parent->IsCopyOrReload()) { noway_assert(parent->OperGet() == oper); noway_assert(tree->IsMultiRegNode()); GenTreeCopyOrReload* copyOrReload = parent->AsCopyOrReload(); noway_assert(copyOrReload->GetRegNumByIdx(multiRegIdx) == REG_NA); copyOrReload->SetRegNumByIdx(refPosition->assignedReg(), multiRegIdx); } else { var_types regType = tree->TypeGet(); if ((regType == TYP_STRUCT) && !tree->IsMultiRegNode()) { assert(compiler->compEnregStructLocals()); assert(tree->IsLocal()); const GenTreeLclVarCommon* lcl = tree->AsLclVarCommon(); const LclVarDsc* varDsc = compiler->lvaGetDesc(lcl); // We create struct copies with a primitive type so we don't bother copy node with parsing structHndl. // Note that for multiReg node we keep each regType in the tree and don't need this. regType = varDsc->GetRegisterType(lcl); assert(regType != TYP_UNDEF); } // Create the new node, with "tree" as its only child. GenTreeCopyOrReload* newNode = new (compiler, oper) GenTreeCopyOrReload(oper, regType, tree); assert(refPosition->registerAssignment != RBM_NONE); SetLsraAdded(newNode); newNode->SetRegNumByIdx(refPosition->assignedReg(), multiRegIdx); if (refPosition->copyReg) { // This is a TEMPORARY copy assert(isCandidateLocalRef(tree) || tree->IsMultiRegLclVar()); newNode->SetLastUse(multiRegIdx); } // Insert the copy/reload after the spilled node and replace the use of the original node with a use // of the copy/reload. blockRange.InsertAfter(tree, newNode); treeUse.ReplaceWith(newNode); } } #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE //------------------------------------------------------------------------ // insertUpperVectorSave: Insert code to save the upper half of a vector that lives // in a callee-save register at the point of a kill (the upper half is // not preserved). // // Arguments: // tree - This is the node before which we will insert the Save. // It will be a call or some node that turns into a call. // refPosition - The RefTypeUpperVectorSave RefPosition. // upperInterval - The Interval for the upper half of the large vector lclVar. // block - the BasicBlock containing the call. // void LinearScan::insertUpperVectorSave(GenTree* tree, RefPosition* refPosition, Interval* upperVectorInterval, BasicBlock* block) { JITDUMP("Inserting UpperVectorSave for RP #%d before %d.%s:\n", refPosition->rpNum, tree->gtTreeID, GenTree::OpName(tree->gtOper)); Interval* lclVarInterval = upperVectorInterval->relatedInterval; assert(lclVarInterval->isLocalVar == true); assert(refPosition->getInterval() == upperVectorInterval); regNumber lclVarReg = lclVarInterval->physReg; if (lclVarReg == REG_NA) { return; } #ifdef DEBUG if (tree->IsCall()) { // Make sure that we do not insert vector save before calls that does not return. assert(!tree->AsCall()->IsNoReturn()); } #endif LclVarDsc* varDsc = compiler->lvaGetDesc(lclVarInterval->varNum); assert(Compiler::varTypeNeedsPartialCalleeSave(varDsc->GetRegisterType())); // On Arm64, we must always have a register to save the upper half, // while on x86 we can spill directly to memory. regNumber spillReg = refPosition->assignedReg(); #ifdef TARGET_ARM64 bool spillToMem = refPosition->spillAfter; assert(spillReg != REG_NA); #else bool spillToMem = (spillReg == REG_NA); assert(!refPosition->spillAfter); #endif LIR::Range& blockRange = LIR::AsRange(block); // Insert the save before the call. GenTree* saveLcl = compiler->gtNewLclvNode(lclVarInterval->varNum, varDsc->lvType); saveLcl->SetRegNum(lclVarReg); SetLsraAdded(saveLcl); GenTreeSIMD* simdNode = compiler->gtNewSIMDNode(LargeVectorSaveType, saveLcl, SIMDIntrinsicUpperSave, varDsc->GetSimdBaseJitType(), genTypeSize(varDsc)); if (simdNode->GetSimdBaseJitType() == CORINFO_TYPE_UNDEF) { // There are a few scenarios where we can get a LCL_VAR which // doesn't know the underlying baseType. In that scenario, we // will just lie and say it is a float. Codegen doesn't actually // care what the type is but this avoids an assert that would // otherwise be fired from the more general checks that happen. simdNode->SetSimdBaseJitType(CORINFO_TYPE_FLOAT); } SetLsraAdded(simdNode); simdNode->SetRegNum(spillReg); if (spillToMem) { simdNode->gtFlags |= GTF_SPILL; upperVectorInterval->physReg = REG_NA; } else { assert((genRegMask(spillReg) & RBM_FLT_CALLEE_SAVED) != RBM_NONE); upperVectorInterval->physReg = spillReg; } blockRange.InsertBefore(tree, LIR::SeqTree(compiler, simdNode)); DISPTREE(simdNode); JITDUMP("\n"); } //------------------------------------------------------------------------ // insertUpperVectorRestore: Insert code to restore the upper half of a vector that has been partially spilled. // // Arguments: // tree - This is the node for which we will insert the Restore. // If non-null, it will be a use of the large vector lclVar. // If null, the Restore will be added to the end of the block. // upperVectorInterval - The Interval for the upper vector for the lclVar. // block - the BasicBlock into which we will be inserting the code. // // Notes: // In the case where 'tree' is non-null, we will insert the restore just prior to // its use, in order to ensure the proper ordering. // void LinearScan::insertUpperVectorRestore(GenTree* tree, RefPosition* refPosition, Interval* upperVectorInterval, BasicBlock* block) { JITDUMP("Adding UpperVectorRestore for RP #%d ", refPosition->rpNum); Interval* lclVarInterval = upperVectorInterval->relatedInterval; assert(lclVarInterval->isLocalVar == true); regNumber lclVarReg = lclVarInterval->physReg; // We should not call this method if the lclVar is not in a register (we should have simply marked the entire // lclVar as spilled). assert(lclVarReg != REG_NA); LclVarDsc* varDsc = compiler->lvaGetDesc(lclVarInterval->varNum); assert(Compiler::varTypeNeedsPartialCalleeSave(varDsc->GetRegisterType())); GenTree* restoreLcl = nullptr; restoreLcl = compiler->gtNewLclvNode(lclVarInterval->varNum, varDsc->lvType); restoreLcl->SetRegNum(lclVarReg); SetLsraAdded(restoreLcl); GenTreeSIMD* simdNode = compiler->gtNewSIMDNode(varDsc->TypeGet(), restoreLcl, SIMDIntrinsicUpperRestore, varDsc->GetSimdBaseJitType(), genTypeSize(varDsc->lvType)); if (simdNode->GetSimdBaseJitType() == CORINFO_TYPE_UNDEF) { // There are a few scenarios where we can get a LCL_VAR which // doesn't know the underlying baseType. In that scenario, we // will just lie and say it is a float. Codegen doesn't actually // care what the type is but this avoids an assert that would // otherwise be fired from the more general checks that happen. simdNode->SetSimdBaseJitType(CORINFO_TYPE_FLOAT); } regNumber restoreReg = upperVectorInterval->physReg; SetLsraAdded(simdNode); if (restoreReg == REG_NA) { // We need a stack location for this. assert(lclVarInterval->isSpilled); #ifdef TARGET_AMD64 assert(refPosition->assignedReg() == REG_NA); simdNode->gtFlags |= GTF_NOREG_AT_USE; #else simdNode->gtFlags |= GTF_SPILLED; assert(refPosition->assignedReg() != REG_NA); restoreReg = refPosition->assignedReg(); #endif } simdNode->SetRegNum(restoreReg); LIR::Range& blockRange = LIR::AsRange(block); JITDUMP("Adding UpperVectorRestore "); if (tree != nullptr) { JITDUMP("before %d.%s:\n", tree->gtTreeID, GenTree::OpName(tree->gtOper)); LIR::Use treeUse; bool foundUse = blockRange.TryGetUse(tree, &treeUse); assert(foundUse); // We need to insert the restore prior to the use, not (necessarily) immediately after the lclVar. blockRange.InsertBefore(treeUse.User(), LIR::SeqTree(compiler, simdNode)); } else { JITDUMP("at end of " FMT_BB ":\n", block->bbNum); if (block->KindIs(BBJ_COND, BBJ_SWITCH)) { noway_assert(!blockRange.IsEmpty()); GenTree* branch = blockRange.LastNode(); assert(branch->OperIsConditionalJump() || branch->OperGet() == GT_SWITCH_TABLE || branch->OperGet() == GT_SWITCH); blockRange.InsertBefore(branch, LIR::SeqTree(compiler, simdNode)); } else { assert(block->KindIs(BBJ_NONE, BBJ_ALWAYS)); blockRange.InsertAtEnd(LIR::SeqTree(compiler, simdNode)); } } DISPTREE(simdNode); JITDUMP("\n"); } #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE //------------------------------------------------------------------------ // initMaxSpill: Initializes the LinearScan members used to track the max number // of concurrent spills. This is needed so that we can set the // fields in Compiler, so that the code generator, in turn can // allocate the right number of spill locations. // // Arguments: // None. // // Return Value: // None. // // Assumptions: // This is called before any calls to updateMaxSpill(). void LinearScan::initMaxSpill() { needDoubleTmpForFPCall = false; needFloatTmpForFPCall = false; for (int i = 0; i < TYP_COUNT; i++) { maxSpill[i] = 0; currentSpill[i] = 0; } } //------------------------------------------------------------------------ // recordMaxSpill: Sets the fields in Compiler for the max number of concurrent spills. // (See the comment on initMaxSpill.) // // Arguments: // None. // // Return Value: // None. // // Assumptions: // This is called after updateMaxSpill() has been called for all "real" // RefPositions. void LinearScan::recordMaxSpill() { // Note: due to the temp normalization process (see tmpNormalizeType) // only a few types should actually be seen here. JITDUMP("Recording the maximum number of concurrent spills:\n"); #ifdef TARGET_X86 var_types returnType = RegSet::tmpNormalizeType(compiler->info.compRetType); if (needDoubleTmpForFPCall || (returnType == TYP_DOUBLE)) { JITDUMP("Adding a spill temp for moving a double call/return value between xmm reg and x87 stack.\n"); maxSpill[TYP_DOUBLE] += 1; } if (needFloatTmpForFPCall || (returnType == TYP_FLOAT)) { JITDUMP("Adding a spill temp for moving a float call/return value between xmm reg and x87 stack.\n"); maxSpill[TYP_FLOAT] += 1; } #endif // TARGET_X86 compiler->codeGen->regSet.tmpBeginPreAllocateTemps(); for (int i = 0; i < TYP_COUNT; i++) { if (var_types(i) != RegSet::tmpNormalizeType(var_types(i))) { // Only normalized types should have anything in the maxSpill array. // We assume here that if type 'i' does not normalize to itself, then // nothing else normalizes to 'i', either. assert(maxSpill[i] == 0); } if (maxSpill[i] != 0) { JITDUMP(" %s: %d\n", varTypeName(var_types(i)), maxSpill[i]); compiler->codeGen->regSet.tmpPreAllocateTemps(var_types(i), maxSpill[i]); } } JITDUMP("\n"); } //------------------------------------------------------------------------ // updateMaxSpill: Update the maximum number of concurrent spills // // Arguments: // refPosition - the current RefPosition being handled // // Return Value: // None. // // Assumptions: // The RefPosition has an associated interval (getInterval() will // otherwise assert). // // Notes: // This is called for each "real" RefPosition during the writeback // phase of LSRA. It keeps track of how many concurrently-live // spills there are, and the largest number seen so far. void LinearScan::updateMaxSpill(RefPosition* refPosition) { RefType refType = refPosition->refType; #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE if ((refType == RefTypeUpperVectorSave) || (refType == RefTypeUpperVectorRestore)) { Interval* interval = refPosition->getInterval(); // If this is not an 'upperVector', it must be a tree temp that has been already // (fully) spilled. if (!interval->isUpperVector) { assert(interval->firstRefPosition->spillAfter); } else { // The UpperVector RefPositions spill to the localVar's home location. Interval* lclVarInterval = interval->relatedInterval; assert(lclVarInterval->isSpilled || (!refPosition->spillAfter && !refPosition->reload)); } return; } #endif // !FEATURE_PARTIAL_SIMD_CALLEE_SAVE if (refPosition->spillAfter || refPosition->reload || (refPosition->RegOptional() && refPosition->assignedReg() == REG_NA)) { Interval* interval = refPosition->getInterval(); if (!interval->isLocalVar) { GenTree* treeNode = refPosition->treeNode; if (treeNode == nullptr) { assert(RefTypeIsUse(refType)); treeNode = interval->firstRefPosition->treeNode; } assert(treeNode != nullptr); // The tmp allocation logic 'normalizes' types to a small number of // types that need distinct stack locations from each other. // Those types are currently gc refs, byrefs, <= 4 byte non-GC items, // 8-byte non-GC items, and 16-byte or 32-byte SIMD vectors. // LSRA is agnostic to those choices but needs // to know what they are here. var_types type; if (!treeNode->IsMultiRegNode()) { type = getDefType(treeNode); } else { type = treeNode->GetRegTypeByIndex(refPosition->getMultiRegIdx()); } type = RegSet::tmpNormalizeType(type); if (refPosition->spillAfter && !refPosition->reload) { currentSpill[type]++; if (currentSpill[type] > maxSpill[type]) { maxSpill[type] = currentSpill[type]; } } else if (refPosition->reload) { assert(currentSpill[type] > 0); currentSpill[type]--; } else if (refPosition->RegOptional() && refPosition->assignedReg() == REG_NA) { // A spill temp not getting reloaded into a reg because it is // marked as allocate if profitable and getting used from its // memory location. To properly account max spill for typ we // decrement spill count. assert(RefTypeIsUse(refType)); assert(currentSpill[type] > 0); currentSpill[type]--; } JITDUMP(" Max spill for %s is %d\n", varTypeName(type), maxSpill[type]); } } } // This is the final phase of register allocation. It writes the register assignments to // the tree, and performs resolution across joins and backedges. // void LinearScan::resolveRegisters() { // Iterate over the tree and the RefPositions in lockstep // - annotate the tree with register assignments by setting GetRegNum() or gtRegPair (for longs) // on the tree node // - track globally-live var locations // - add resolution points at split/merge/critical points as needed // Need to use the same traversal order as the one that assigns the location numbers. // Dummy RefPositions have been added at any split, join or critical edge, at the // point where resolution may be required. These are located: // - for a split, at the top of the non-adjacent block // - for a join, at the bottom of the non-adjacent joining block // - for a critical edge, at the top of the target block of each critical // edge. // Note that a target block may have multiple incoming critical or split edges // // These RefPositions record the expected location of the Interval at that point. // At each branch, we identify the location of each liveOut interval, and check // against the RefPositions at the target. BasicBlock* block; LsraLocation currentLocation = MinLocation; // Clear register assignments - these will be reestablished as lclVar defs (including RefTypeParamDefs) // are encountered. if (enregisterLocalVars) { for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg)) { RegRecord* physRegRecord = getRegisterRecord(reg); Interval* assignedInterval = physRegRecord->assignedInterval; if (assignedInterval != nullptr) { assignedInterval->assignedReg = nullptr; assignedInterval->physReg = REG_NA; } physRegRecord->assignedInterval = nullptr; physRegRecord->recentRefPosition = nullptr; } // Clear "recentRefPosition" for lclVar intervals for (unsigned varIndex = 0; varIndex < compiler->lvaTrackedCount; varIndex++) { if (localVarIntervals[varIndex] != nullptr) { localVarIntervals[varIndex]->recentRefPosition = nullptr; localVarIntervals[varIndex]->isActive = false; } else { assert(!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate); } } } // handle incoming arguments and special temps RefPositionIterator refPosIterator = refPositions.begin(); RefPosition* currentRefPosition = &refPosIterator; if (enregisterLocalVars) { VarToRegMap entryVarToRegMap = inVarToRegMaps[compiler->fgFirstBB->bbNum]; for (; refPosIterator != refPositions.end() && (currentRefPosition->refType == RefTypeParamDef || currentRefPosition->refType == RefTypeZeroInit); ++refPosIterator, currentRefPosition = &refPosIterator) { Interval* interval = currentRefPosition->getInterval(); assert(interval != nullptr && interval->isLocalVar); resolveLocalRef(nullptr, nullptr, currentRefPosition); regNumber reg = REG_STK; int varIndex = interval->getVarIndex(compiler); if (!currentRefPosition->spillAfter && currentRefPosition->registerAssignment != RBM_NONE) { reg = currentRefPosition->assignedReg(); } else { reg = REG_STK; interval->isActive = false; } setVarReg(entryVarToRegMap, varIndex, reg); } } else { assert(refPosIterator == refPositions.end() || (refPosIterator->refType != RefTypeParamDef && refPosIterator->refType != RefTypeZeroInit)); } // write back assignments for (block = startBlockSequence(); block != nullptr; block = moveToNextBlock()) { assert(curBBNum == block->bbNum); if (enregisterLocalVars) { // Record the var locations at the start of this block. // (If it's fgFirstBB, we've already done that above, see entryVarToRegMap) curBBStartLocation = currentRefPosition->nodeLocation; if (block != compiler->fgFirstBB) { processBlockStartLocations(block); } // Handle the DummyDefs, updating the incoming var location. for (; refPosIterator != refPositions.end() && currentRefPosition->refType == RefTypeDummyDef; ++refPosIterator, currentRefPosition = &refPosIterator) { assert(currentRefPosition->isIntervalRef()); // Don't mark dummy defs as reload currentRefPosition->reload = false; resolveLocalRef(nullptr, nullptr, currentRefPosition); regNumber reg; if (currentRefPosition->registerAssignment != RBM_NONE) { reg = currentRefPosition->assignedReg(); } else { reg = REG_STK; currentRefPosition->getInterval()->isActive = false; } setInVarRegForBB(curBBNum, currentRefPosition->getInterval()->varNum, reg); } } // The next RefPosition should be for the block. Move past it. assert(refPosIterator != refPositions.end()); assert(currentRefPosition->refType == RefTypeBB); ++refPosIterator; currentRefPosition = &refPosIterator; // Handle the RefPositions for the block for (; refPosIterator != refPositions.end() && currentRefPosition->refType != RefTypeBB && currentRefPosition->refType != RefTypeDummyDef; ++refPosIterator, currentRefPosition = &refPosIterator) { currentLocation = currentRefPosition->nodeLocation; // Ensure that the spill & copy info is valid. // First, if it's reload, it must not be copyReg or moveReg assert(!currentRefPosition->reload || (!currentRefPosition->copyReg && !currentRefPosition->moveReg)); // If it's copyReg it must not be moveReg, and vice-versa assert(!currentRefPosition->copyReg || !currentRefPosition->moveReg); switch (currentRefPosition->refType) { #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE case RefTypeUpperVectorSave: case RefTypeUpperVectorRestore: #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE case RefTypeUse: case RefTypeDef: // These are the ones we're interested in break; case RefTypeKill: case RefTypeFixedReg: // These require no handling at resolution time assert(currentRefPosition->referent != nullptr); currentRefPosition->referent->recentRefPosition = currentRefPosition; continue; case RefTypeExpUse: // Ignore the ExpUse cases - a RefTypeExpUse would only exist if the // variable is dead at the entry to the next block. So we'll mark // it as in its current location and resolution will take care of any // mismatch. assert(getNextBlock() == nullptr || !VarSetOps::IsMember(compiler, getNextBlock()->bbLiveIn, currentRefPosition->getInterval()->getVarIndex(compiler))); currentRefPosition->referent->recentRefPosition = currentRefPosition; continue; case RefTypeKillGCRefs: // No action to take at resolution time, and no interval to update recentRefPosition for. continue; case RefTypeDummyDef: case RefTypeParamDef: case RefTypeZeroInit: // Should have handled all of these already default: unreached(); break; } updateMaxSpill(currentRefPosition); GenTree* treeNode = currentRefPosition->treeNode; #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE if (currentRefPosition->refType == RefTypeUpperVectorSave) { // The treeNode is a call or something that might become one. noway_assert(treeNode != nullptr); // If the associated interval is an UpperVector, this must be a RefPosition for a LargeVectorType // LocalVar. // Otherwise, this is a non-lclVar interval that has been spilled, and we don't need to do anything. Interval* interval = currentRefPosition->getInterval(); if (interval->isUpperVector) { Interval* localVarInterval = interval->relatedInterval; if ((localVarInterval->physReg != REG_NA) && !localVarInterval->isPartiallySpilled) { // If the localVar is in a register, it must be in a register that is not trashed by // the current node (otherwise it would have already been spilled). assert((genRegMask(localVarInterval->physReg) & getKillSetForNode(treeNode)) == RBM_NONE); // If we have allocated a register to spill it to, we will use that; otherwise, we will spill it // to the stack. We can use as a temp register any non-arg caller-save register. currentRefPosition->referent->recentRefPosition = currentRefPosition; insertUpperVectorSave(treeNode, currentRefPosition, currentRefPosition->getInterval(), block); localVarInterval->isPartiallySpilled = true; } } else { // This is a non-lclVar interval that must have been spilled. assert(!currentRefPosition->getInterval()->isLocalVar); assert(currentRefPosition->getInterval()->firstRefPosition->spillAfter); } continue; } else if (currentRefPosition->refType == RefTypeUpperVectorRestore) { // Since we don't do partial restores of tree temp intervals, this must be an upperVector. Interval* interval = currentRefPosition->getInterval(); Interval* localVarInterval = interval->relatedInterval; assert(interval->isUpperVector && (localVarInterval != nullptr)); if (localVarInterval->physReg != REG_NA) { assert(localVarInterval->isPartiallySpilled); assert((localVarInterval->assignedReg != nullptr) && (localVarInterval->assignedReg->regNum == localVarInterval->physReg) && (localVarInterval->assignedReg->assignedInterval == localVarInterval)); insertUpperVectorRestore(treeNode, currentRefPosition, interval, block); } localVarInterval->isPartiallySpilled = false; } #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE // Most uses won't actually need to be recorded (they're on the def). // In those cases, treeNode will be nullptr. if (treeNode == nullptr) { // This is either a use, a dead def, or a field of a struct Interval* interval = currentRefPosition->getInterval(); assert(currentRefPosition->refType == RefTypeUse || currentRefPosition->registerAssignment == RBM_NONE || interval->isStructField || interval->IsUpperVector()); // TODO-Review: Need to handle the case where any of the struct fields // are reloaded/spilled at this use assert(!interval->isStructField || (currentRefPosition->reload == false && currentRefPosition->spillAfter == false)); if (interval->isLocalVar && !interval->isStructField) { LclVarDsc* varDsc = interval->getLocalVar(compiler); // This must be a dead definition. We need to mark the lclVar // so that it's not considered a candidate for lvRegister, as // this dead def will have to go to the stack. assert(currentRefPosition->refType == RefTypeDef); varDsc->SetRegNum(REG_STK); } continue; } assert(currentRefPosition->isIntervalRef()); if (currentRefPosition->getInterval()->isInternal) { treeNode->gtRsvdRegs |= currentRefPosition->registerAssignment; } else { writeRegisters(currentRefPosition, treeNode); if (treeNode->OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR) && currentRefPosition->getInterval()->isLocalVar) { resolveLocalRef(block, treeNode->AsLclVar(), currentRefPosition); } // Mark spill locations on temps // (local vars are handled in resolveLocalRef, above) // Note that the tree node will be changed from GTF_SPILL to GTF_SPILLED // in codegen, taking care of the "reload" case for temps else if (currentRefPosition->spillAfter || (currentRefPosition->nextRefPosition != nullptr && currentRefPosition->nextRefPosition->moveReg)) { if (treeNode != nullptr) { if (currentRefPosition->spillAfter) { treeNode->gtFlags |= GTF_SPILL; // If this is a constant interval that is reusing a pre-existing value, we actually need // to generate the value at this point in order to spill it. if (treeNode->IsReuseRegVal()) { treeNode->ResetReuseRegVal(); } // In case of multi-reg node, also set spill flag on the // register specified by multi-reg index of current RefPosition. // Note that the spill flag on treeNode indicates that one or // more its allocated registers are in that state. if (treeNode->IsMultiRegCall()) { GenTreeCall* call = treeNode->AsCall(); call->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx()); } #if FEATURE_ARG_SPLIT else if (treeNode->OperIsPutArgSplit()) { GenTreePutArgSplit* splitArg = treeNode->AsPutArgSplit(); splitArg->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx()); } #ifdef TARGET_ARM else if (compFeatureArgSplit() && treeNode->OperIsMultiRegOp()) { GenTreeMultiRegOp* multiReg = treeNode->AsMultiRegOp(); multiReg->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx()); } #endif // TARGET_ARM #endif // FEATURE_ARG_SPLIT } // If the value is reloaded or moved to a different register, we need to insert // a node to hold the register to which it should be reloaded RefPosition* nextRefPosition = currentRefPosition->nextRefPosition; noway_assert(nextRefPosition != nullptr); if (INDEBUG(alwaysInsertReload() ||) nextRefPosition->assignedReg() != currentRefPosition->assignedReg()) { #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // Note that we asserted above that this is an Interval RefPosition. Interval* currentInterval = currentRefPosition->getInterval(); if (!currentInterval->isUpperVector && nextRefPosition->refType == RefTypeUpperVectorSave) { // The currentRefPosition is a spill of a tree temp. // These have no associated Restore, as we always spill if the vector is // in a register when this is encountered. // The nextRefPosition we're interested in (where we may need to insert a // reload or flag as GTF_NOREG_AT_USE) is the subsequent RefPosition. assert(!currentInterval->isLocalVar); nextRefPosition = nextRefPosition->nextRefPosition; assert(nextRefPosition->refType != RefTypeUpperVectorSave); } // UpperVector intervals may have unique assignments at each reference. if (!currentInterval->isUpperVector) #endif { if (nextRefPosition->assignedReg() != REG_NA) { insertCopyOrReload(block, treeNode, currentRefPosition->getMultiRegIdx(), nextRefPosition); } else { assert(nextRefPosition->RegOptional()); // In case of tree temps, if def is spilled and use didn't // get a register, set a flag on tree node to be treated as // contained at the point of its use. if (currentRefPosition->spillAfter && currentRefPosition->refType == RefTypeDef && nextRefPosition->refType == RefTypeUse) { assert(nextRefPosition->treeNode == nullptr); treeNode->gtFlags |= GTF_NOREG_AT_USE; } } } } } // We should never have to "spill after" a temp use, since // they're single use else { unreached(); } } } } if (enregisterLocalVars) { processBlockEndLocations(block); } } if (enregisterLocalVars) { #ifdef DEBUG if (VERBOSE) { printf("-----------------------\n"); printf("RESOLVING BB BOUNDARIES\n"); printf("-----------------------\n"); printf("Resolution Candidates: "); dumpConvertedVarSet(compiler, resolutionCandidateVars); printf("\n"); printf("Has %sCritical Edges\n\n", hasCriticalEdges ? "" : "No "); printf("Prior to Resolution\n"); for (BasicBlock* const block : compiler->Blocks()) { printf("\n" FMT_BB, block->bbNum); if (block->hasEHBoundaryIn()) { JITDUMP(" EH flow in"); } if (block->hasEHBoundaryOut()) { JITDUMP(" EH flow out"); } printf("\nuse def in out\n"); dumpConvertedVarSet(compiler, block->bbVarUse); printf("\n"); dumpConvertedVarSet(compiler, block->bbVarDef); printf("\n"); dumpConvertedVarSet(compiler, block->bbLiveIn); printf("\n"); dumpConvertedVarSet(compiler, block->bbLiveOut); printf("\n"); dumpInVarToRegMap(block); dumpOutVarToRegMap(block); } printf("\n\n"); } #endif // DEBUG resolveEdges(); // Verify register assignments on variables unsigned lclNum; LclVarDsc* varDsc; for (lclNum = 0, varDsc = compiler->lvaTable; lclNum < compiler->lvaCount; lclNum++, varDsc++) { if (!isCandidateVar(varDsc)) { varDsc->SetRegNum(REG_STK); } else { Interval* interval = getIntervalForLocalVar(varDsc->lvVarIndex); // Determine initial position for parameters if (varDsc->lvIsParam) { regMaskTP initialRegMask = interval->firstRefPosition->registerAssignment; regNumber initialReg = (initialRegMask == RBM_NONE || interval->firstRefPosition->spillAfter) ? REG_STK : genRegNumFromMask(initialRegMask); #ifdef TARGET_ARM if (varTypeIsMultiReg(varDsc)) { // TODO-ARM-NYI: Map the hi/lo intervals back to lvRegNum and GetOtherReg() (these should NYI // before this) assert(!"Multi-reg types not yet supported"); } else #endif // TARGET_ARM { varDsc->SetArgInitReg(initialReg); JITDUMP(" Set V%02u argument initial register to %s\n", lclNum, getRegName(initialReg)); } // Stack args that are part of dependently-promoted structs should never be register candidates (see // LinearScan::isRegCandidate). assert(varDsc->lvIsRegArg || !compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc)); } // If lvRegNum is REG_STK, that means that either no register // was assigned, or (more likely) that the same register was not // used for all references. In that case, codegen gets the register // from the tree node. if (varDsc->GetRegNum() == REG_STK || interval->isSpilled || interval->isSplit) { // For codegen purposes, we'll set lvRegNum to whatever register // it's currently in as we go. // However, we never mark an interval as lvRegister if it has either been spilled // or split. varDsc->lvRegister = false; // Skip any dead defs or exposed uses // (first use exposed will only occur when there is no explicit initialization) RefPosition* firstRefPosition = interval->firstRefPosition; while ((firstRefPosition != nullptr) && (firstRefPosition->refType == RefTypeExpUse)) { firstRefPosition = firstRefPosition->nextRefPosition; } if (firstRefPosition == nullptr) { // Dead interval varDsc->lvLRACandidate = false; if (varDsc->lvRefCnt() == 0) { varDsc->lvOnFrame = false; } else { // We may encounter cases where a lclVar actually has no references, but // a non-zero refCnt. For safety (in case this is some "hidden" lclVar that we're // not correctly recognizing), we'll mark those as needing a stack location. // TODO-Cleanup: Make this an assert if/when we correct the refCnt // updating. varDsc->lvOnFrame = true; } } else { // If the interval was not spilled, it doesn't need a stack location. if (!interval->isSpilled) { varDsc->lvOnFrame = false; } if (firstRefPosition->registerAssignment == RBM_NONE || firstRefPosition->spillAfter) { // Either this RefPosition is spilled, or regOptional or it is not a "real" def or use assert( firstRefPosition->spillAfter || firstRefPosition->RegOptional() || (firstRefPosition->refType != RefTypeDef && firstRefPosition->refType != RefTypeUse)); varDsc->SetRegNum(REG_STK); } else { varDsc->SetRegNum(firstRefPosition->assignedReg()); } } } else { { varDsc->lvRegister = true; varDsc->lvOnFrame = false; } #ifdef DEBUG regMaskTP registerAssignment = genRegMask(varDsc->GetRegNum()); assert(!interval->isSpilled && !interval->isSplit); RefPosition* refPosition = interval->firstRefPosition; assert(refPosition != nullptr); while (refPosition != nullptr) { // All RefPositions must match, except for dead definitions, // copyReg/moveReg and RefTypeExpUse positions if (refPosition->registerAssignment != RBM_NONE && !refPosition->copyReg && !refPosition->moveReg && refPosition->refType != RefTypeExpUse) { assert(refPosition->registerAssignment == registerAssignment); } refPosition = refPosition->nextRefPosition; } #endif // DEBUG } } } } #ifdef DEBUG if (VERBOSE) { printf("Trees after linear scan register allocator (LSRA)\n"); compiler->fgDispBasicBlocks(true); } verifyFinalAllocation(); #endif // DEBUG compiler->raMarkStkVars(); recordMaxSpill(); // TODO-CQ: Review this comment and address as needed. // Change all unused promoted non-argument struct locals to a non-GC type (in this case TYP_INT) // so that the gc tracking logic and lvMustInit logic will ignore them. // Extract the code that does this from raAssignVars, and call it here. // PRECONDITIONS: Ensure that lvPromoted is set on promoted structs, if and // only if it is promoted on all paths. // Call might be something like: // compiler->BashUnusedStructLocals(); } // //------------------------------------------------------------------------ // insertMove: Insert a move of a lclVar with the given lclNum into the given block. // // Arguments: // block - the BasicBlock into which the move will be inserted. // insertionPoint - the instruction before which to insert the move // lclNum - the lclNum of the var to be moved // fromReg - the register from which the var is moving // toReg - the register to which the var is moving // // Return Value: // None. // // Notes: // If insertionPoint is non-NULL, insert before that instruction; // otherwise, insert "near" the end (prior to the branch, if any). // If fromReg or toReg is REG_STK, then move from/to memory, respectively. void LinearScan::insertMove( BasicBlock* block, GenTree* insertionPoint, unsigned lclNum, regNumber fromReg, regNumber toReg) { LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum); // the lclVar must be a register candidate assert(isRegCandidate(varDsc)); // One or both MUST be a register assert(fromReg != REG_STK || toReg != REG_STK); // They must not be the same register. assert(fromReg != toReg); // This var can't be marked lvRegister now varDsc->SetRegNum(REG_STK); GenTree* src = compiler->gtNewLclvNode(lclNum, varDsc->TypeGet()); SetLsraAdded(src); // There are three cases we need to handle: // - We are loading a lclVar from the stack. // - We are storing a lclVar to the stack. // - We are copying a lclVar between registers. // // In the first and second cases, the lclVar node will be marked with GTF_SPILLED and GTF_SPILL, respectively. // It is up to the code generator to ensure that any necessary normalization is done when loading or storing the // lclVar's value. // // In the third case, we generate GT_COPY(GT_LCL_VAR) and type each node with the normalized type of the lclVar. // This is safe because a lclVar is always normalized once it is in a register. GenTree* dst = src; if (fromReg == REG_STK) { src->gtFlags |= GTF_SPILLED; src->SetRegNum(toReg); } else if (toReg == REG_STK) { src->gtFlags |= GTF_SPILL; src->SetRegNum(fromReg); } else { var_types movType = varDsc->GetRegisterType(); src->gtType = movType; dst = new (compiler, GT_COPY) GenTreeCopyOrReload(GT_COPY, movType, src); // This is the new home of the lclVar - indicate that by clearing the GTF_VAR_DEATH flag. // Note that if src is itself a lastUse, this will have no effect. dst->gtFlags &= ~(GTF_VAR_DEATH); src->SetRegNum(fromReg); dst->SetRegNum(toReg); SetLsraAdded(dst); } dst->SetUnusedValue(); LIR::Range treeRange = LIR::SeqTree(compiler, dst); LIR::Range& blockRange = LIR::AsRange(block); if (insertionPoint != nullptr) { blockRange.InsertBefore(insertionPoint, std::move(treeRange)); } else { // Put the copy at the bottom GenTree* lastNode = blockRange.LastNode(); if (block->KindIs(BBJ_COND, BBJ_SWITCH)) { noway_assert(!blockRange.IsEmpty()); GenTree* branch = lastNode; assert(branch->OperIsConditionalJump() || branch->OperGet() == GT_SWITCH_TABLE || branch->OperGet() == GT_SWITCH); blockRange.InsertBefore(branch, std::move(treeRange)); } else { // These block kinds don't have a branch at the end. assert((lastNode == nullptr) || (!lastNode->OperIsConditionalJump() && !lastNode->OperIs(GT_SWITCH_TABLE, GT_SWITCH, GT_RETURN, GT_RETFILT))); blockRange.InsertAtEnd(std::move(treeRange)); } } } void LinearScan::insertSwap( BasicBlock* block, GenTree* insertionPoint, unsigned lclNum1, regNumber reg1, unsigned lclNum2, regNumber reg2) { #ifdef DEBUG if (VERBOSE) { const char* insertionPointString = "top"; if (insertionPoint == nullptr) { insertionPointString = "bottom"; } printf(" " FMT_BB " %s: swap V%02u in %s with V%02u in %s\n", block->bbNum, insertionPointString, lclNum1, getRegName(reg1), lclNum2, getRegName(reg2)); } #endif // DEBUG LclVarDsc* varDsc1 = compiler->lvaGetDesc(lclNum1); LclVarDsc* varDsc2 = compiler->lvaGetDesc(lclNum2); assert(reg1 != REG_STK && reg1 != REG_NA && reg2 != REG_STK && reg2 != REG_NA); GenTree* lcl1 = compiler->gtNewLclvNode(lclNum1, varDsc1->TypeGet()); lcl1->SetRegNum(reg1); SetLsraAdded(lcl1); GenTree* lcl2 = compiler->gtNewLclvNode(lclNum2, varDsc2->TypeGet()); lcl2->SetRegNum(reg2); SetLsraAdded(lcl2); GenTree* swap = compiler->gtNewOperNode(GT_SWAP, TYP_VOID, lcl1, lcl2); swap->SetRegNum(REG_NA); SetLsraAdded(swap); lcl1->gtNext = lcl2; lcl2->gtPrev = lcl1; lcl2->gtNext = swap; swap->gtPrev = lcl2; LIR::Range swapRange = LIR::SeqTree(compiler, swap); LIR::Range& blockRange = LIR::AsRange(block); if (insertionPoint != nullptr) { blockRange.InsertBefore(insertionPoint, std::move(swapRange)); } else { // Put the copy at the bottom // If there's a branch, make an embedded statement that executes just prior to the branch if (block->KindIs(BBJ_COND, BBJ_SWITCH)) { noway_assert(!blockRange.IsEmpty()); GenTree* branch = blockRange.LastNode(); assert(branch->OperIsConditionalJump() || branch->OperGet() == GT_SWITCH_TABLE || branch->OperGet() == GT_SWITCH); blockRange.InsertBefore(branch, std::move(swapRange)); } else { assert(block->KindIs(BBJ_NONE, BBJ_ALWAYS)); blockRange.InsertAtEnd(std::move(swapRange)); } } } //------------------------------------------------------------------------ // getTempRegForResolution: Get a free register to use for resolution code. // // Arguments: // fromBlock - The "from" block on the edge being resolved. // toBlock - The "to"block on the edge // type - the type of register required // // Return Value: // Returns a register that is free on the given edge, or REG_NA if none is available. // // Notes: // It is up to the caller to check the return value, and to determine whether a register is // available, and to handle that case appropriately. // It is also up to the caller to cache the return value, as this is not cheap to compute. regNumber LinearScan::getTempRegForResolution(BasicBlock* fromBlock, BasicBlock* toBlock, var_types type) { // TODO-Throughput: This would be much more efficient if we add RegToVarMaps instead of VarToRegMaps // and they would be more space-efficient as well. VarToRegMap fromVarToRegMap = getOutVarToRegMap(fromBlock->bbNum); VarToRegMap toVarToRegMap = getInVarToRegMap(toBlock->bbNum); #ifdef TARGET_ARM regMaskTP freeRegs; if (type == TYP_DOUBLE) { // We have to consider all float registers for TYP_DOUBLE freeRegs = allRegs(TYP_FLOAT); } else { freeRegs = allRegs(type); } #else // !TARGET_ARM regMaskTP freeRegs = allRegs(type); #endif // !TARGET_ARM #ifdef DEBUG if (getStressLimitRegs() == LSRA_LIMIT_SMALL_SET) { return REG_NA; } #endif // DEBUG INDEBUG(freeRegs = stressLimitRegs(nullptr, freeRegs)); // We are only interested in the variables that are live-in to the "to" block. VarSetOps::Iter iter(compiler, toBlock->bbLiveIn); unsigned varIndex = 0; while (iter.NextElem(&varIndex) && freeRegs != RBM_NONE) { regNumber fromReg = getVarReg(fromVarToRegMap, varIndex); regNumber toReg = getVarReg(toVarToRegMap, varIndex); assert(fromReg != REG_NA && toReg != REG_NA); if (fromReg != REG_STK) { freeRegs &= ~genRegMask(fromReg, getIntervalForLocalVar(varIndex)->registerType); } if (toReg != REG_STK) { freeRegs &= ~genRegMask(toReg, getIntervalForLocalVar(varIndex)->registerType); } } #ifdef TARGET_ARM if (type == TYP_DOUBLE) { // Exclude any doubles for which the odd half isn't in freeRegs. freeRegs = freeRegs & ((freeRegs << 1) & RBM_ALLDOUBLE); } #endif if (freeRegs == RBM_NONE) { return REG_NA; } else { regNumber tempReg = genRegNumFromMask(genFindLowestBit(freeRegs)); return tempReg; } } #ifdef TARGET_ARM //------------------------------------------------------------------------ // addResolutionForDouble: Add resolution move(s) for TYP_DOUBLE interval // and update location. // // Arguments: // block - the BasicBlock into which the move will be inserted. // insertionPoint - the instruction before which to insert the move // sourceIntervals - maintains sourceIntervals[reg] which each 'reg' is associated with // location - maintains location[reg] which is the location of the var that was originally in 'reg'. // toReg - the register to which the var is moving // fromReg - the register from which the var is moving // resolveType - the type of resolution to be performed // // Return Value: // None. // // Notes: // It inserts at least one move and updates incoming parameter 'location'. // void LinearScan::addResolutionForDouble(BasicBlock* block, GenTree* insertionPoint, Interval** sourceIntervals, regNumberSmall* location, regNumber toReg, regNumber fromReg, ResolveType resolveType) { regNumber secondHalfTargetReg = REG_NEXT(fromReg); Interval* intervalToBeMoved1 = sourceIntervals[fromReg]; Interval* intervalToBeMoved2 = sourceIntervals[secondHalfTargetReg]; assert(!(intervalToBeMoved1 == nullptr && intervalToBeMoved2 == nullptr)); if (intervalToBeMoved1 != nullptr) { if (intervalToBeMoved1->registerType == TYP_DOUBLE) { // TYP_DOUBLE interval occupies a double register, i.e. two float registers. assert(intervalToBeMoved2 == nullptr); assert(genIsValidDoubleReg(toReg)); } else { // TYP_FLOAT interval occupies 1st half of double register, i.e. 1st float register assert(genIsValidFloatReg(toReg)); } addResolution(block, insertionPoint, intervalToBeMoved1, toReg, fromReg); JITDUMP(" (%s)\n", resolveTypeName[resolveType]); location[fromReg] = (regNumberSmall)toReg; } if (intervalToBeMoved2 != nullptr) { // TYP_FLOAT interval occupies 2nd half of double register. assert(intervalToBeMoved2->registerType == TYP_FLOAT); regNumber secondHalfTempReg = REG_NEXT(toReg); addResolution(block, insertionPoint, intervalToBeMoved2, secondHalfTempReg, secondHalfTargetReg); JITDUMP(" (%s)\n", resolveTypeName[resolveType]); location[secondHalfTargetReg] = (regNumberSmall)secondHalfTempReg; } return; } #endif // TARGET_ARM //------------------------------------------------------------------------ // addResolution: Add a resolution move of the given interval // // Arguments: // block - the BasicBlock into which the move will be inserted. // insertionPoint - the instruction before which to insert the move // interval - the interval of the var to be moved // toReg - the register to which the var is moving // fromReg - the register from which the var is moving // // Return Value: // None. // // Notes: // For joins, we insert at the bottom (indicated by an insertionPoint // of nullptr), while for splits we insert at the top. // This is because for joins 'block' is a pred of the join, while for splits it is a succ. // For critical edges, this function may be called twice - once to move from // the source (fromReg), if any, to the stack, in which case toReg will be // REG_STK, and we insert at the bottom (leave insertionPoint as nullptr). // The next time, we want to move from the stack to the destination (toReg), // in which case fromReg will be REG_STK, and we insert at the top. void LinearScan::addResolution( BasicBlock* block, GenTree* insertionPoint, Interval* interval, regNumber toReg, regNumber fromReg) { #ifdef DEBUG const char* insertionPointString; if (insertionPoint == nullptr) { // We can't add resolution to a register at the bottom of a block that has an EHBoundaryOut, // except in the case of the "EH Dummy" resolution from the stack. assert((block->bbNum > bbNumMaxBeforeResolution) || (fromReg == REG_STK) || !blockInfo[block->bbNum].hasEHBoundaryOut); insertionPointString = "bottom"; } else { // We can't add resolution at the top of a block that has an EHBoundaryIn, // except in the case of the "EH Dummy" resolution to the stack. assert((block->bbNum > bbNumMaxBeforeResolution) || (toReg == REG_STK) || !blockInfo[block->bbNum].hasEHBoundaryIn); insertionPointString = "top"; } // We should never add resolution move inside BBCallAlwaysPairTail. noway_assert(!block->isBBCallAlwaysPairTail()); #endif // DEBUG JITDUMP(" " FMT_BB " %s: move V%02u from ", block->bbNum, insertionPointString, interval->varNum); JITDUMP("%s to %s", getRegName(fromReg), getRegName(toReg)); insertMove(block, insertionPoint, interval->varNum, fromReg, toReg); if (fromReg == REG_STK || toReg == REG_STK) { assert(interval->isSpilled); } else { // We should have already marked this as spilled or split. assert((interval->isSpilled) || (interval->isSplit)); } INTRACK_STATS(updateLsraStat(STAT_RESOLUTION_MOV, block->bbNum)); } //------------------------------------------------------------------------ // handleOutgoingCriticalEdges: Performs the necessary resolution on all critical edges that feed out of 'block' // // Arguments: // block - the block with outgoing critical edges. // // Return Value: // None.. // // Notes: // For all outgoing critical edges (i.e. any successor of this block which is // a join edge), if there are any conflicts, split the edge by adding a new block, // and generate the resolution code into that block. void LinearScan::handleOutgoingCriticalEdges(BasicBlock* block) { VARSET_TP outResolutionSet(VarSetOps::Intersection(compiler, block->bbLiveOut, resolutionCandidateVars)); if (VarSetOps::IsEmpty(compiler, outResolutionSet)) { return; } VARSET_TP sameResolutionSet(VarSetOps::MakeEmpty(compiler)); VARSET_TP diffResolutionSet(VarSetOps::MakeEmpty(compiler)); // Get the outVarToRegMap for this block VarToRegMap outVarToRegMap = getOutVarToRegMap(block->bbNum); unsigned succCount = block->NumSucc(compiler); assert(succCount > 1); // First, determine the live regs at the end of this block so that we know what regs are // available to copy into. // Note that for this purpose we use the full live-out set, because we must ensure that // even the registers that remain the same across the edge are preserved correctly. regMaskTP liveOutRegs = RBM_NONE; VarSetOps::Iter liveOutIter(compiler, block->bbLiveOut); unsigned liveOutVarIndex = 0; while (liveOutIter.NextElem(&liveOutVarIndex)) { regNumber fromReg = getVarReg(outVarToRegMap, liveOutVarIndex); if (fromReg != REG_STK) { regMaskTP fromRegMask = genRegMask(fromReg, getIntervalForLocalVar(liveOutVarIndex)->registerType); liveOutRegs |= fromRegMask; } } // Next, if this blocks ends with a switch table, or for Arm64, ends with JCMP instruction, // make sure to not copy into the registers that are consumed at the end of this block. // // Note: Only switches and JCMP (for Arm4) have input regs (and so can be fed by copies), so those // are the only block-ending branches that need special handling. regMaskTP consumedRegs = RBM_NONE; if (block->bbJumpKind == BBJ_SWITCH) { // At this point, Lowering has transformed any non-switch-table blocks into // cascading ifs. GenTree* switchTable = LIR::AsRange(block).LastNode(); assert(switchTable != nullptr && switchTable->OperGet() == GT_SWITCH_TABLE); consumedRegs = switchTable->gtRsvdRegs; GenTree* op1 = switchTable->gtGetOp1(); GenTree* op2 = switchTable->gtGetOp2(); noway_assert(op1 != nullptr && op2 != nullptr); assert(op1->GetRegNum() != REG_NA && op2->GetRegNum() != REG_NA); // No floating point values, so no need to worry about the register type // (i.e. for ARM32, where we used the genRegMask overload with a type). assert(varTypeIsIntegralOrI(op1) && varTypeIsIntegralOrI(op2)); consumedRegs |= genRegMask(op1->GetRegNum()); consumedRegs |= genRegMask(op2->GetRegNum()); // Special handling for GT_COPY to not resolve into the source // of switch's operand. if (op1->OperIs(GT_COPY)) { GenTree* srcOp1 = op1->gtGetOp1(); consumedRegs |= genRegMask(srcOp1->GetRegNum()); } } #ifdef TARGET_ARM64 // Next, if this blocks ends with a JCMP, we have to make sure: // 1. Not to copy into the register that JCMP uses // e.g. JCMP w21, BRANCH // 2. Not to copy into the source of JCMP's operand before it is consumed // e.g. Should not use w0 since it will contain wrong value after resolution // call METHOD // ; mov w0, w19 <-- should not resolve in w0 here. // mov w21, w0 // JCMP w21, BRANCH // 3. Not to modify the local variable it must consume // Note: GT_COPY has special handling in codegen and its generation is merged with the // node that consumes its result. So both, the input and output regs of GT_COPY must be // excluded from the set available for resolution. LclVarDsc* jcmpLocalVarDsc = nullptr; if (block->bbJumpKind == BBJ_COND) { GenTree* lastNode = LIR::AsRange(block).LastNode(); if (lastNode->OperIs(GT_JCMP)) { GenTree* op1 = lastNode->gtGetOp1(); consumedRegs |= genRegMask(op1->GetRegNum()); if (op1->OperIs(GT_COPY)) { GenTree* srcOp1 = op1->gtGetOp1(); consumedRegs |= genRegMask(srcOp1->GetRegNum()); } if (op1->IsLocal()) { GenTreeLclVarCommon* lcl = op1->AsLclVarCommon(); jcmpLocalVarDsc = &compiler->lvaTable[lcl->GetLclNum()]; } } } #endif VarToRegMap sameVarToRegMap = sharedCriticalVarToRegMap; regMaskTP sameWriteRegs = RBM_NONE; regMaskTP diffReadRegs = RBM_NONE; // For each var that may require resolution, classify them as: // - in the same register at the end of this block and at each target (no resolution needed) // - in different registers at different targets (resolve separately): // diffResolutionSet // - in the same register at each target at which it's live, but different from the end of // this block. We may be able to resolve these as if it is "join", but only if they do not // write to any registers that are read by those in the diffResolutionSet: // sameResolutionSet VarSetOps::Iter outResolutionSetIter(compiler, outResolutionSet); unsigned outResolutionSetVarIndex = 0; while (outResolutionSetIter.NextElem(&outResolutionSetVarIndex)) { regNumber fromReg = getVarReg(outVarToRegMap, outResolutionSetVarIndex); bool maybeSameLivePaths = false; bool liveOnlyAtSplitEdge = true; regNumber sameToReg = REG_NA; for (unsigned succIndex = 0; succIndex < succCount; succIndex++) { BasicBlock* succBlock = block->GetSucc(succIndex, compiler); if (!VarSetOps::IsMember(compiler, succBlock->bbLiveIn, outResolutionSetVarIndex)) { maybeSameLivePaths = true; continue; } else if (liveOnlyAtSplitEdge) { // Is the var live only at those target blocks which are connected by a split edge to this block liveOnlyAtSplitEdge = ((succBlock->bbPreds->flNext == nullptr) && (succBlock != compiler->fgFirstBB)); } regNumber toReg = getVarReg(getInVarToRegMap(succBlock->bbNum), outResolutionSetVarIndex); if (sameToReg == REG_NA) { sameToReg = toReg; continue; } if (toReg == sameToReg) { continue; } sameToReg = REG_NA; break; } // Check for the cases where we can't write to a register. // We only need to check for these cases if sameToReg is an actual register (not REG_STK). if (sameToReg != REG_NA && sameToReg != REG_STK) { // If there's a path on which this var isn't live, it may use the original value in sameToReg. // In this case, sameToReg will be in the liveOutRegs of this block. // Similarly, if sameToReg is in sameWriteRegs, it has already been used (i.e. for a lclVar that's // live only at another target), and we can't copy another lclVar into that reg in this block. regMaskTP sameToRegMask = genRegMask(sameToReg, getIntervalForLocalVar(outResolutionSetVarIndex)->registerType); if (maybeSameLivePaths && (((sameToRegMask & liveOutRegs) != RBM_NONE) || ((sameToRegMask & sameWriteRegs) != RBM_NONE))) { sameToReg = REG_NA; } // If this register is busy because it is used by a switch table at the end of the block // (or for Arm64, it is consumed by JCMP), we can't do the copy in this block since we can't // insert it after the switch (or for Arm64, can't insert and overwrite the operand/source // of operand of JCMP). if ((sameToRegMask & consumedRegs) != RBM_NONE) { sameToReg = REG_NA; } #ifdef TARGET_ARM64 if (jcmpLocalVarDsc && (jcmpLocalVarDsc->lvVarIndex == outResolutionSetVarIndex)) { sameToReg = REG_NA; } #endif // If the var is live only at those blocks connected by a split edge and not live-in at some of the // target blocks, we will resolve it the same way as if it were in diffResolutionSet and resolution // will be deferred to the handling of split edges, which means copy will only be at those target(s). // // Another way to achieve similar resolution for vars live only at split edges is by removing them // from consideration up-front but it requires that we traverse those edges anyway to account for // the registers that must not be overwritten. if (liveOnlyAtSplitEdge && maybeSameLivePaths) { sameToReg = REG_NA; } } if (sameToReg == REG_NA) { VarSetOps::AddElemD(compiler, diffResolutionSet, outResolutionSetVarIndex); if (fromReg != REG_STK) { diffReadRegs |= genRegMask(fromReg, getIntervalForLocalVar(outResolutionSetVarIndex)->registerType); } } else if (sameToReg != fromReg) { VarSetOps::AddElemD(compiler, sameResolutionSet, outResolutionSetVarIndex); setVarReg(sameVarToRegMap, outResolutionSetVarIndex, sameToReg); if (sameToReg != REG_STK) { sameWriteRegs |= genRegMask(sameToReg, getIntervalForLocalVar(outResolutionSetVarIndex)->registerType); } } } if (!VarSetOps::IsEmpty(compiler, sameResolutionSet)) { if ((sameWriteRegs & diffReadRegs) != RBM_NONE) { // We cannot split the "same" and "diff" regs if the "same" set writes registers // that must be read by the "diff" set. (Note that when these are done as a "batch" // we carefully order them to ensure all the input regs are read before they are // overwritten.) VarSetOps::UnionD(compiler, diffResolutionSet, sameResolutionSet); VarSetOps::ClearD(compiler, sameResolutionSet); } else { // For any vars in the sameResolutionSet, we can simply add the move at the end of "block". resolveEdge(block, nullptr, ResolveSharedCritical, sameResolutionSet); } } if (!VarSetOps::IsEmpty(compiler, diffResolutionSet)) { for (unsigned succIndex = 0; succIndex < succCount; succIndex++) { BasicBlock* succBlock = block->GetSucc(succIndex, compiler); // Any "diffResolutionSet" resolution for a block with no other predecessors will be handled later // as split resolution. if ((succBlock->bbPreds->flNext == nullptr) && (succBlock != compiler->fgFirstBB)) { continue; } // Now collect the resolution set for just this edge, if any. // Check only the vars in diffResolutionSet that are live-in to this successor. VarToRegMap succInVarToRegMap = getInVarToRegMap(succBlock->bbNum); VARSET_TP edgeResolutionSet(VarSetOps::Intersection(compiler, diffResolutionSet, succBlock->bbLiveIn)); VarSetOps::Iter iter(compiler, edgeResolutionSet); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { regNumber fromReg = getVarReg(outVarToRegMap, varIndex); regNumber toReg = getVarReg(succInVarToRegMap, varIndex); if (fromReg == toReg) { VarSetOps::RemoveElemD(compiler, edgeResolutionSet, varIndex); } } if (!VarSetOps::IsEmpty(compiler, edgeResolutionSet)) { // For EH vars, we can always safely load them from the stack into the target for this block, // so if we have only EH vars, we'll do that instead of splitting the edge. if ((compiler->compHndBBtabCount > 0) && VarSetOps::IsSubset(compiler, edgeResolutionSet, exceptVars)) { GenTree* insertionPoint = LIR::AsRange(succBlock).FirstNode(); VarSetOps::Iter edgeSetIter(compiler, edgeResolutionSet); unsigned edgeVarIndex = 0; while (edgeSetIter.NextElem(&edgeVarIndex)) { regNumber toReg = getVarReg(succInVarToRegMap, edgeVarIndex); setVarReg(succInVarToRegMap, edgeVarIndex, REG_STK); if (toReg != REG_STK) { Interval* interval = getIntervalForLocalVar(edgeVarIndex); assert(interval->isWriteThru); addResolution(succBlock, insertionPoint, interval, toReg, REG_STK); JITDUMP(" (EHvar)\n"); } } } else { resolveEdge(block, succBlock, ResolveCritical, edgeResolutionSet); } } } } } //------------------------------------------------------------------------ // resolveEdges: Perform resolution across basic block edges // // Arguments: // None. // // Return Value: // None. // // Notes: // Traverse the basic blocks. // - If this block has a single predecessor that is not the immediately // preceding block, perform any needed 'split' resolution at the beginning of this block // - Otherwise if this block has critical incoming edges, handle them. // - If this block has a single successor that has multiple predecesors, perform any needed // 'join' resolution at the end of this block. // Note that a block may have both 'split' or 'critical' incoming edge(s) and 'join' outgoing // edges. void LinearScan::resolveEdges() { JITDUMP("RESOLVING EDGES\n"); // The resolutionCandidateVars set was initialized with all the lclVars that are live-in to // any block. We now intersect that set with any lclVars that ever spilled or split. // If there are no candidates for resoultion, simply return. VarSetOps::IntersectionD(compiler, resolutionCandidateVars, splitOrSpilledVars); if (VarSetOps::IsEmpty(compiler, resolutionCandidateVars)) { return; } // Handle all the critical edges first. // We will try to avoid resolution across critical edges in cases where all the critical-edge // targets of a block have the same home. We will then split the edges only for the // remaining mismatches. We visit the out-edges, as that allows us to share the moves that are // common among all the targets. if (hasCriticalEdges) { for (BasicBlock* const block : compiler->Blocks()) { if (block->bbNum > bbNumMaxBeforeResolution) { // This is a new block added during resolution - we don't need to visit these now. continue; } if (blockInfo[block->bbNum].hasCriticalOutEdge) { handleOutgoingCriticalEdges(block); } } } for (BasicBlock* const block : compiler->Blocks()) { if (block->bbNum > bbNumMaxBeforeResolution) { // This is a new block added during resolution - we don't need to visit these now. continue; } unsigned succCount = block->NumSucc(compiler); BasicBlock* uniquePredBlock = block->GetUniquePred(compiler); // First, if this block has a single predecessor, // we may need resolution at the beginning of this block. // This may be true even if it's the block we used for starting locations, // if a variable was spilled. VARSET_TP inResolutionSet(VarSetOps::Intersection(compiler, block->bbLiveIn, resolutionCandidateVars)); if (!VarSetOps::IsEmpty(compiler, inResolutionSet)) { if (uniquePredBlock != nullptr) { // We may have split edges during critical edge resolution, and in the process split // a non-critical edge as well. // It is unlikely that we would ever have more than one of these in sequence (indeed, // I don't think it's possible), but there's no need to assume that it can't. while (uniquePredBlock->bbNum > bbNumMaxBeforeResolution) { uniquePredBlock = uniquePredBlock->GetUniquePred(compiler); noway_assert(uniquePredBlock != nullptr); } resolveEdge(uniquePredBlock, block, ResolveSplit, inResolutionSet); } } // Finally, if this block has a single successor: // - and that has at least one other predecessor (otherwise we will do the resolution at the // top of the successor), // - and that is not the target of a critical edge (otherwise we've already handled it) // we may need resolution at the end of this block. if (succCount == 1) { BasicBlock* succBlock = block->GetSucc(0, compiler); if (succBlock->GetUniquePred(compiler) == nullptr) { VARSET_TP outResolutionSet( VarSetOps::Intersection(compiler, succBlock->bbLiveIn, resolutionCandidateVars)); if (!VarSetOps::IsEmpty(compiler, outResolutionSet)) { resolveEdge(block, succBlock, ResolveJoin, outResolutionSet); } } } } // Now, fixup the mapping for any blocks that were adding for edge splitting. // See the comment prior to the call to fgSplitEdge() in resolveEdge(). // Note that we could fold this loop in with the checking code below, but that // would only improve the debug case, and would clutter up the code somewhat. if (compiler->fgBBNumMax > bbNumMaxBeforeResolution) { for (BasicBlock* const block : compiler->Blocks()) { if (block->bbNum > bbNumMaxBeforeResolution) { // There may be multiple blocks inserted when we split. But we must always have exactly // one path (i.e. all blocks must be single-successor and single-predecessor), // and only one block along the path may be non-empty. // Note that we may have a newly-inserted block that is empty, but which connects // two non-resolution blocks. This happens when an edge is split that requires it. BasicBlock* succBlock = block; do { succBlock = succBlock->GetUniqueSucc(); noway_assert(succBlock != nullptr); } while ((succBlock->bbNum > bbNumMaxBeforeResolution) && succBlock->isEmpty()); BasicBlock* predBlock = block; do { predBlock = predBlock->GetUniquePred(compiler); noway_assert(predBlock != nullptr); } while ((predBlock->bbNum > bbNumMaxBeforeResolution) && predBlock->isEmpty()); unsigned succBBNum = succBlock->bbNum; unsigned predBBNum = predBlock->bbNum; if (block->isEmpty()) { // For the case of the empty block, find the non-resolution block (succ or pred). if (predBBNum > bbNumMaxBeforeResolution) { assert(succBBNum <= bbNumMaxBeforeResolution); predBBNum = 0; } else { succBBNum = 0; } } else { assert((succBBNum <= bbNumMaxBeforeResolution) && (predBBNum <= bbNumMaxBeforeResolution)); } SplitEdgeInfo info = {predBBNum, succBBNum}; getSplitBBNumToTargetBBNumMap()->Set(block->bbNum, info); // Set both the live-in and live-out to the live-in of the successor (by construction liveness // doesn't change in a split block). VarSetOps::Assign(compiler, block->bbLiveIn, succBlock->bbLiveIn); VarSetOps::Assign(compiler, block->bbLiveOut, succBlock->bbLiveIn); } } } #ifdef DEBUG // Make sure the varToRegMaps match up on all edges. bool foundMismatch = false; for (BasicBlock* const block : compiler->Blocks()) { if (block->isEmpty() && block->bbNum > bbNumMaxBeforeResolution) { continue; } VarToRegMap toVarToRegMap = getInVarToRegMap(block->bbNum); for (BasicBlock* const predBlock : block->PredBlocks()) { VarToRegMap fromVarToRegMap = getOutVarToRegMap(predBlock->bbNum); VarSetOps::Iter iter(compiler, block->bbLiveIn); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { regNumber fromReg = getVarReg(fromVarToRegMap, varIndex); regNumber toReg = getVarReg(toVarToRegMap, varIndex); if (fromReg != toReg) { Interval* interval = getIntervalForLocalVar(varIndex); // The fromReg and toReg may not match for a write-thru interval where the toReg is // REG_STK, since the stack value is always valid for that case (so no move is needed). if (!interval->isWriteThru || (toReg != REG_STK)) { if (!foundMismatch) { foundMismatch = true; printf("Found mismatched var locations after resolution!\n"); } printf(" V%02u: " FMT_BB " to " FMT_BB ": %s to %s\n", interval->varNum, predBlock->bbNum, block->bbNum, getRegName(fromReg), getRegName(toReg)); } } } } } assert(!foundMismatch); #endif JITDUMP("\n"); } //------------------------------------------------------------------------ // resolveEdge: Perform the specified type of resolution between two blocks. // // Arguments: // fromBlock - the block from which the edge originates // toBlock - the block at which the edge terminates // resolveType - the type of resolution to be performed // liveSet - the set of tracked lclVar indices which may require resolution // // Return Value: // None. // // Assumptions: // The caller must have performed the analysis to determine the type of the edge. // // Notes: // This method emits the correctly ordered moves necessary to place variables in the // correct registers across a Split, Join or Critical edge. // In order to avoid overwriting register values before they have been moved to their // new home (register/stack), it first does the register-to-stack moves (to free those // registers), then the register to register moves, ensuring that the target register // is free before the move, and then finally the stack to register moves. void LinearScan::resolveEdge(BasicBlock* fromBlock, BasicBlock* toBlock, ResolveType resolveType, VARSET_VALARG_TP liveSet) { VarToRegMap fromVarToRegMap = getOutVarToRegMap(fromBlock->bbNum); VarToRegMap toVarToRegMap; if (resolveType == ResolveSharedCritical) { toVarToRegMap = sharedCriticalVarToRegMap; } else { toVarToRegMap = getInVarToRegMap(toBlock->bbNum); } // The block to which we add the resolution moves depends on the resolveType BasicBlock* block; switch (resolveType) { case ResolveJoin: case ResolveSharedCritical: block = fromBlock; break; case ResolveSplit: block = toBlock; break; case ResolveCritical: // fgSplitEdge may add one or two BasicBlocks. It returns the block that splits // the edge from 'fromBlock' and 'toBlock', but if it inserts that block right after // a block with a fall-through it will have to create another block to handle that edge. // These new blocks can be mapped to existing blocks in order to correctly handle // the calls to recordVarLocationsAtStartOfBB() from codegen. That mapping is handled // in resolveEdges(), after all the edge resolution has been done (by calling this // method for each edge). block = compiler->fgSplitEdge(fromBlock, toBlock); // Split edges are counted against fromBlock. INTRACK_STATS(updateLsraStat(STAT_SPLIT_EDGE, fromBlock->bbNum)); break; default: unreached(); break; } #ifndef TARGET_XARCH // We record tempregs for beginning and end of each block. // For amd64/x86 we only need a tempReg for float - we'll use xchg for int. // TODO-Throughput: It would be better to determine the tempRegs on demand, but the code below // modifies the varToRegMaps so we don't have all the correct registers at the time // we need to get the tempReg. regNumber tempRegInt = (resolveType == ResolveSharedCritical) ? REG_NA : getTempRegForResolution(fromBlock, toBlock, TYP_INT); #endif // !TARGET_XARCH regNumber tempRegFlt = REG_NA; #ifdef TARGET_ARM regNumber tempRegDbl = REG_NA; #endif if ((compiler->compFloatingPointUsed) && (resolveType != ResolveSharedCritical)) { #ifdef TARGET_ARM // Try to reserve a double register for TYP_DOUBLE and use it for TYP_FLOAT too if available. tempRegDbl = getTempRegForResolution(fromBlock, toBlock, TYP_DOUBLE); if (tempRegDbl != REG_NA) { tempRegFlt = tempRegDbl; } else #endif // TARGET_ARM { tempRegFlt = getTempRegForResolution(fromBlock, toBlock, TYP_FLOAT); } } regMaskTP targetRegsToDo = RBM_NONE; regMaskTP targetRegsReady = RBM_NONE; regMaskTP targetRegsFromStack = RBM_NONE; // The following arrays capture the location of the registers as they are moved: // - location[reg] gives the current location of the var that was originally in 'reg'. // (Note that a var may be moved more than once.) // - source[reg] gives the original location of the var that needs to be moved to 'reg'. // For example, if a var is in rax and needs to be moved to rsi, then we would start with: // location[rax] == rax // source[rsi] == rax -- this doesn't change // Then, if for some reason we need to move it temporary to rbx, we would have: // location[rax] == rbx // Once we have completed the move, we will have: // location[rax] == REG_NA // This indicates that the var originally in rax is now in its target register. regNumberSmall location[REG_COUNT]; C_ASSERT(sizeof(char) == sizeof(regNumberSmall)); // for memset to work memset(location, REG_NA, REG_COUNT); regNumberSmall source[REG_COUNT]; memset(source, REG_NA, REG_COUNT); // What interval is this register associated with? // (associated with incoming reg) Interval* sourceIntervals[REG_COUNT]; memset(&sourceIntervals, 0, sizeof(sourceIntervals)); // Intervals for vars that need to be loaded from the stack Interval* stackToRegIntervals[REG_COUNT]; memset(&stackToRegIntervals, 0, sizeof(stackToRegIntervals)); // Get the starting insertion point for the "to" resolution GenTree* insertionPoint = nullptr; if (resolveType == ResolveSplit || resolveType == ResolveCritical) { insertionPoint = LIR::AsRange(block).FirstNode(); } // If this is an edge between EH regions, we may have "extra" live-out EH vars. // If we are adding resolution at the end of the block, we need to create "virtual" moves // for these so that their registers are freed and can be reused. if ((resolveType == ResolveJoin) && (compiler->compHndBBtabCount > 0)) { VARSET_TP extraLiveSet(VarSetOps::Diff(compiler, block->bbLiveOut, toBlock->bbLiveIn)); VarSetOps::IntersectionD(compiler, extraLiveSet, exceptVars); VarSetOps::Iter iter(compiler, extraLiveSet); unsigned extraVarIndex = 0; while (iter.NextElem(&extraVarIndex)) { Interval* interval = getIntervalForLocalVar(extraVarIndex); assert(interval->isWriteThru); regNumber fromReg = getVarReg(fromVarToRegMap, extraVarIndex); if (fromReg != REG_STK) { addResolution(block, insertionPoint, interval, REG_STK, fromReg); JITDUMP(" (EH DUMMY)\n"); setVarReg(fromVarToRegMap, extraVarIndex, REG_STK); } } } // First: // - Perform all moves from reg to stack (no ordering needed on these) // - For reg to reg moves, record the current location, associating their // source location with the target register they need to go into // - For stack to reg moves (done last, no ordering needed between them) // record the interval associated with the target reg // TODO-Throughput: We should be looping over the liveIn and liveOut registers, since // that will scale better than the live variables VarSetOps::Iter iter(compiler, liveSet); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { Interval* interval = getIntervalForLocalVar(varIndex); regNumber fromReg = getVarReg(fromVarToRegMap, varIndex); regNumber toReg = getVarReg(toVarToRegMap, varIndex); if (fromReg == toReg) { continue; } if (interval->isWriteThru && (toReg == REG_STK)) { // We don't actually move a writeThru var back to the stack, as its stack value is always valid. // However, if this is a Join edge (i.e. the move is happening at the bottom of the block), // and it is a "normal" flow edge, we will go ahead and generate a mov instruction, which will be // a NOP but will cause the variable to be removed from being live in the register. if ((resolveType == ResolveSplit) || block->hasEHBoundaryOut()) { continue; } } // For Critical edges, the location will not change on either side of the edge, // since we'll add a new block to do the move. if (resolveType == ResolveSplit) { setVarReg(toVarToRegMap, varIndex, fromReg); } else if (resolveType == ResolveJoin || resolveType == ResolveSharedCritical) { setVarReg(fromVarToRegMap, varIndex, toReg); } assert(fromReg < UCHAR_MAX && toReg < UCHAR_MAX); if (fromReg == REG_STK) { stackToRegIntervals[toReg] = interval; targetRegsFromStack |= genRegMask(toReg); } else if (toReg == REG_STK) { // Do the reg to stack moves now addResolution(block, insertionPoint, interval, REG_STK, fromReg); JITDUMP(" (%s)\n", (interval->isWriteThru && (toReg == REG_STK)) ? "EH DUMMY" : resolveTypeName[resolveType]); } else { location[fromReg] = (regNumberSmall)fromReg; source[toReg] = (regNumberSmall)fromReg; sourceIntervals[fromReg] = interval; targetRegsToDo |= genRegMask(toReg); } } // REGISTER to REGISTER MOVES // First, find all the ones that are ready to move now regMaskTP targetCandidates = targetRegsToDo; while (targetCandidates != RBM_NONE) { regMaskTP targetRegMask = genFindLowestBit(targetCandidates); targetCandidates &= ~targetRegMask; regNumber targetReg = genRegNumFromMask(targetRegMask); if (location[targetReg] == REG_NA) { #ifdef TARGET_ARM regNumber sourceReg = (regNumber)source[targetReg]; Interval* interval = sourceIntervals[sourceReg]; if (interval->registerType == TYP_DOUBLE) { // For ARM32, make sure that both of the float halves of the double register are available. assert(genIsValidDoubleReg(targetReg)); regNumber anotherHalfRegNum = REG_NEXT(targetReg); if (location[anotherHalfRegNum] == REG_NA) { targetRegsReady |= targetRegMask; } } else #endif // TARGET_ARM { targetRegsReady |= targetRegMask; } } } // Perform reg to reg moves while (targetRegsToDo != RBM_NONE) { while (targetRegsReady != RBM_NONE) { regMaskTP targetRegMask = genFindLowestBit(targetRegsReady); targetRegsToDo &= ~targetRegMask; targetRegsReady &= ~targetRegMask; regNumber targetReg = genRegNumFromMask(targetRegMask); assert(location[targetReg] != targetReg); assert(targetReg < REG_COUNT); regNumber sourceReg = (regNumber)source[targetReg]; assert(sourceReg < REG_COUNT); regNumber fromReg = (regNumber)location[sourceReg]; // stack to reg movs should be done last as part of "targetRegsFromStack" assert(fromReg < REG_STK); Interval* interval = sourceIntervals[sourceReg]; assert(interval != nullptr); addResolution(block, insertionPoint, interval, targetReg, fromReg); JITDUMP(" (%s)\n", resolveTypeName[resolveType]); sourceIntervals[sourceReg] = nullptr; location[sourceReg] = REG_NA; regMaskTP fromRegMask = genRegMask(fromReg); // Do we have a free targetReg? if (fromReg == sourceReg) { if (source[fromReg] != REG_NA && ((targetRegsFromStack & fromRegMask) != fromRegMask)) { targetRegsReady |= fromRegMask; #ifdef TARGET_ARM if (genIsValidDoubleReg(fromReg)) { // Ensure that either: // - the Interval targeting fromReg is not double, or // - the other half of the double is free. Interval* otherInterval = sourceIntervals[source[fromReg]]; regNumber upperHalfReg = REG_NEXT(fromReg); if ((otherInterval->registerType == TYP_DOUBLE) && (location[upperHalfReg] != REG_NA)) { targetRegsReady &= ~fromRegMask; } } } else if (genIsValidFloatReg(fromReg) && !genIsValidDoubleReg(fromReg)) { // We may have freed up the other half of a double where the lower half // was already free. regNumber lowerHalfReg = REG_PREV(fromReg); regNumber lowerHalfSrcReg = (regNumber)source[lowerHalfReg]; regNumber lowerHalfSrcLoc = (regNumber)location[lowerHalfReg]; regMaskTP lowerHalfRegMask = genRegMask(lowerHalfReg); // Necessary conditions: // - There is a source register for this reg (lowerHalfSrcReg != REG_NA) // - It is currently free (lowerHalfSrcLoc == REG_NA) // - The source interval isn't yet completed (sourceIntervals[lowerHalfSrcReg] != nullptr) // - It's not in the ready set ((targetRegsReady & lowerHalfRegMask) == // RBM_NONE) // - It's not resolved from stack ((targetRegsFromStack & lowerHalfRegMask) != // lowerHalfRegMask) if ((lowerHalfSrcReg != REG_NA) && (lowerHalfSrcLoc == REG_NA) && (sourceIntervals[lowerHalfSrcReg] != nullptr) && ((targetRegsReady & lowerHalfRegMask) == RBM_NONE) && ((targetRegsFromStack & lowerHalfRegMask) != lowerHalfRegMask)) { // This must be a double interval, otherwise it would be in targetRegsReady, or already // completed. assert(sourceIntervals[lowerHalfSrcReg]->registerType == TYP_DOUBLE); targetRegsReady |= lowerHalfRegMask; } #endif // TARGET_ARM } } } if (targetRegsToDo != RBM_NONE) { regMaskTP targetRegMask = genFindLowestBit(targetRegsToDo); regNumber targetReg = genRegNumFromMask(targetRegMask); // Is it already there due to other moves? // If not, move it to the temp reg, OR swap it with another register regNumber sourceReg = (regNumber)source[targetReg]; regNumber fromReg = (regNumber)location[sourceReg]; if (targetReg == fromReg) { targetRegsToDo &= ~targetRegMask; } else { regNumber tempReg = REG_NA; bool useSwap = false; if (emitter::isFloatReg(targetReg)) { #ifdef TARGET_ARM if (sourceIntervals[fromReg]->registerType == TYP_DOUBLE) { // ARM32 requires a double temp register for TYP_DOUBLE. tempReg = tempRegDbl; } else #endif // TARGET_ARM tempReg = tempRegFlt; } #ifdef TARGET_XARCH else { useSwap = true; } #else // !TARGET_XARCH else { tempReg = tempRegInt; } #endif // !TARGET_XARCH if (useSwap || tempReg == REG_NA) { // First, we have to figure out the destination register for what's currently in fromReg, // so that we can find its sourceInterval. regNumber otherTargetReg = REG_NA; // By chance, is fromReg going where it belongs? if (location[source[fromReg]] == targetReg) { otherTargetReg = fromReg; // If we can swap, we will be done with otherTargetReg as well. // Otherwise, we'll spill it to the stack and reload it later. if (useSwap) { regMaskTP fromRegMask = genRegMask(fromReg); targetRegsToDo &= ~fromRegMask; } } else { // Look at the remaining registers from targetRegsToDo (which we expect to be relatively // small at this point) to find out what's currently in targetReg. regMaskTP mask = targetRegsToDo; while (mask != RBM_NONE && otherTargetReg == REG_NA) { regMaskTP nextRegMask = genFindLowestBit(mask); regNumber nextReg = genRegNumFromMask(nextRegMask); mask &= ~nextRegMask; if (location[source[nextReg]] == targetReg) { otherTargetReg = nextReg; } } } assert(otherTargetReg != REG_NA); if (useSwap) { // Generate a "swap" of fromReg and targetReg insertSwap(block, insertionPoint, sourceIntervals[source[otherTargetReg]]->varNum, targetReg, sourceIntervals[sourceReg]->varNum, fromReg); location[sourceReg] = REG_NA; location[source[otherTargetReg]] = (regNumberSmall)fromReg; INTRACK_STATS(updateLsraStat(STAT_RESOLUTION_MOV, block->bbNum)); } else { // Spill "targetReg" to the stack and add its eventual target (otherTargetReg) // to "targetRegsFromStack", which will be handled below. // NOTE: This condition is very rare. Setting COMPlus_JitStressRegs=0x203 // has been known to trigger it in JIT SH. // First, spill "otherInterval" from targetReg to the stack. Interval* otherInterval = sourceIntervals[source[otherTargetReg]]; setIntervalAsSpilled(otherInterval); addResolution(block, insertionPoint, otherInterval, REG_STK, targetReg); JITDUMP(" (%s)\n", resolveTypeName[resolveType]); location[source[otherTargetReg]] = REG_STK; regMaskTP otherTargetRegMask = genRegMask(otherTargetReg); targetRegsFromStack |= otherTargetRegMask; stackToRegIntervals[otherTargetReg] = otherInterval; targetRegsToDo &= ~otherTargetRegMask; // Now, move the interval that is going to targetReg. addResolution(block, insertionPoint, sourceIntervals[sourceReg], targetReg, fromReg); JITDUMP(" (%s)\n", resolveTypeName[resolveType]); location[sourceReg] = REG_NA; // Add its "fromReg" to "targetRegsReady", only if: // - It was one of the target register we originally determined. // - It is not the eventual target (otherTargetReg) because its // value will be retrieved from STK. if (source[fromReg] != REG_NA && fromReg != otherTargetReg) { regMaskTP fromRegMask = genRegMask(fromReg); targetRegsReady |= fromRegMask; #ifdef TARGET_ARM if (genIsValidDoubleReg(fromReg)) { // Ensure that either: // - the Interval targeting fromReg is not double, or // - the other half of the double is free. Interval* otherInterval = sourceIntervals[source[fromReg]]; regNumber upperHalfReg = REG_NEXT(fromReg); if ((otherInterval->registerType == TYP_DOUBLE) && (location[upperHalfReg] != REG_NA)) { targetRegsReady &= ~fromRegMask; } } #endif // TARGET_ARM } } targetRegsToDo &= ~targetRegMask; } else { compiler->codeGen->regSet.rsSetRegsModified(genRegMask(tempReg) DEBUGARG(true)); #ifdef TARGET_ARM if (sourceIntervals[fromReg]->registerType == TYP_DOUBLE) { assert(genIsValidDoubleReg(targetReg)); assert(genIsValidDoubleReg(tempReg)); addResolutionForDouble(block, insertionPoint, sourceIntervals, location, tempReg, targetReg, resolveType); } else #endif // TARGET_ARM { assert(sourceIntervals[targetReg] != nullptr); addResolution(block, insertionPoint, sourceIntervals[targetReg], tempReg, targetReg); JITDUMP(" (%s)\n", resolveTypeName[resolveType]); location[targetReg] = (regNumberSmall)tempReg; } targetRegsReady |= targetRegMask; } } } } // Finally, perform stack to reg moves // All the target regs will be empty at this point while (targetRegsFromStack != RBM_NONE) { regMaskTP targetRegMask = genFindLowestBit(targetRegsFromStack); targetRegsFromStack &= ~targetRegMask; regNumber targetReg = genRegNumFromMask(targetRegMask); Interval* interval = stackToRegIntervals[targetReg]; assert(interval != nullptr); addResolution(block, insertionPoint, interval, targetReg, REG_STK); JITDUMP(" (%s)\n", resolveTypeName[resolveType]); } } #if TRACK_LSRA_STATS const char* LinearScan::getStatName(unsigned stat) { LsraStat lsraStat = (LsraStat)stat; assert(lsraStat != LsraStat::COUNT); static const char* const lsraStatNames[] = { #define LSRA_STAT_DEF(stat, name) name, #include "lsra_stats.h" #undef LSRA_STAT_DEF #define REG_SEL_DEF(stat, value, shortname, orderSeqId) #stat, #include "lsra_score.h" #undef REG_SEL_DEF }; assert(stat < ArrLen(lsraStatNames)); return lsraStatNames[lsraStat]; } LsraStat LinearScan::getLsraStatFromScore(RegisterScore registerScore) { switch (registerScore) { #define REG_SEL_DEF(stat, value, shortname, orderSeqId) \ case RegisterScore::stat: \ return LsraStat::STAT_##stat; #include "lsra_score.h" #undef REG_SEL_DEF default: return LsraStat::STAT_FREE; } } // ---------------------------------------------------------- // updateLsraStat: Increment LSRA stat counter. // // Arguments: // stat - LSRA stat enum // bbNum - Basic block to which LSRA stat needs to be // associated with. // void LinearScan::updateLsraStat(LsraStat stat, unsigned bbNum) { if (bbNum > bbNumMaxBeforeResolution) { // This is a newly created basic block as part of resolution. // These blocks contain resolution moves that are already accounted. return; } ++(blockInfo[bbNum].stats[(unsigned)stat]); } // ----------------------------------------------------------- // dumpLsraStats - dumps Lsra stats to given file. // // Arguments: // file - file to which stats are to be written. // void LinearScan::dumpLsraStats(FILE* file) { unsigned sumStats[LsraStat::COUNT] = {0}; weight_t wtdStats[LsraStat::COUNT] = {0}; fprintf(file, "----------\n"); fprintf(file, "LSRA Stats"); #ifdef DEBUG if (!VERBOSE) { fprintf(file, " : %s\n", compiler->info.compFullName); } else { // In verbose mode no need to print full name // while printing lsra stats. fprintf(file, "\n"); } #else fprintf(file, " : %s\n", compiler->eeGetMethodFullName(compiler->info.compCompHnd)); #endif fprintf(file, "----------\n"); #ifdef DEBUG fprintf(file, "Register selection order: %S\n", JitConfig.JitLsraOrdering() == nullptr ? W("ABCDEFGHIJKLMNOPQ") : JitConfig.JitLsraOrdering()); #endif fprintf(file, "Total Tracked Vars: %d\n", compiler->lvaTrackedCount); fprintf(file, "Total Reg Cand Vars: %d\n", regCandidateVarCount); fprintf(file, "Total number of Intervals: %d\n", static_cast<unsigned>((intervals.size() == 0 ? 0 : (intervals.size() - 1)))); fprintf(file, "Total number of RefPositions: %d\n", static_cast<unsigned>(refPositions.size() - 1)); // compute total number of spill temps created unsigned numSpillTemps = 0; for (int i = 0; i < TYP_COUNT; i++) { numSpillTemps += maxSpill[i]; } fprintf(file, "Total Number of spill temps created: %d\n", numSpillTemps); fprintf(file, "..........\n"); bool addedBlockHeader = false; bool anyNonZeroStat = false; // Iterate for block 0 for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++) { unsigned lsraStat = blockInfo[0].stats[statIndex]; if (lsraStat != 0) { if (!addedBlockHeader) { addedBlockHeader = true; fprintf(file, FMT_BB " [%8.2f]: ", 0, blockInfo[0].weight); fprintf(file, "%s = %d", getStatName(statIndex), lsraStat); } else { fprintf(file, ", %s = %d", getStatName(statIndex), lsraStat); } sumStats[statIndex] += lsraStat; wtdStats[statIndex] += (lsraStat * blockInfo[0].weight); anyNonZeroStat = true; } } if (anyNonZeroStat) { fprintf(file, "\n"); } // Iterate for remaining blocks for (BasicBlock* const block : compiler->Blocks()) { if (block->bbNum > bbNumMaxBeforeResolution) { continue; } addedBlockHeader = false; anyNonZeroStat = false; for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++) { unsigned lsraStat = blockInfo[block->bbNum].stats[statIndex]; if (lsraStat != 0) { if (!addedBlockHeader) { addedBlockHeader = true; fprintf(file, FMT_BB " [%8.2f]: ", block->bbNum, block->bbWeight); fprintf(file, "%s = %d", getStatName(statIndex), lsraStat); } else { fprintf(file, ", %s = %d", getStatName(statIndex), lsraStat); } sumStats[statIndex] += lsraStat; wtdStats[statIndex] += (lsraStat * block->bbWeight); anyNonZeroStat = true; } } if (anyNonZeroStat) { fprintf(file, "\n"); } } fprintf(file, "..........\n"); for (int regSelectI = 0; regSelectI < LsraStat::COUNT; regSelectI++) { if (regSelectI == firstRegSelStat) { fprintf(file, "..........\n"); } if ((regSelectI < firstRegSelStat) || (sumStats[regSelectI] != 0)) { // Print register selection stats if (regSelectI >= firstRegSelStat) { fprintf(file, "Total %s [#%2d] : %d Weighted: %f\n", getStatName(regSelectI), (regSelectI - firstRegSelStat + 1), sumStats[regSelectI], wtdStats[regSelectI]); } else { fprintf(file, "Total %s : %d Weighted: %f\n", getStatName(regSelectI), sumStats[regSelectI], wtdStats[regSelectI]); } } } printf("\n"); } // ----------------------------------------------------------- // dumpLsraStatsCsvFormat - dumps Lsra stats to given file in csv format. // // Arguments: // file - file to which stats are to be written. // void LinearScan::dumpLsraStatsCsv(FILE* file) { unsigned sumStats[LsraStat::COUNT] = {0}; // Write the header if the file is empty if (ftell(file) == 0) { // header fprintf(file, "\"Method Name\""); for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++) { fprintf(file, ",\"%s\"", LinearScan::getStatName(statIndex)); } fprintf(file, ",\"PerfScore\"\n"); } // bbNum == 0 for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++) { sumStats[statIndex] += blockInfo[0].stats[statIndex]; } // blocks for (BasicBlock* const block : compiler->Blocks()) { if (block->bbNum > bbNumMaxBeforeResolution) { continue; } for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++) { sumStats[statIndex] += blockInfo[block->bbNum].stats[statIndex]; } } fprintf(file, "\"%s\"", compiler->info.compFullName); for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++) { fprintf(file, ",%u", sumStats[statIndex]); } fprintf(file, ",%.2f\n", compiler->info.compPerfScore); } // ----------------------------------------------------------- // dumpLsraStatsSummary - dumps Lsra stats summary to given file // // Arguments: // file - file to which stats are to be written. // void LinearScan::dumpLsraStatsSummary(FILE* file) { unsigned sumStats[LsraStat::STAT_FREE] = {0}; weight_t wtdStats[LsraStat::STAT_FREE] = {0.0}; // Iterate for block 0 for (int statIndex = 0; statIndex < LsraStat::STAT_FREE; statIndex++) { unsigned lsraStat = blockInfo[0].stats[statIndex]; sumStats[statIndex] += lsraStat; wtdStats[statIndex] += (lsraStat * blockInfo[0].weight); } // Iterate for remaining blocks for (BasicBlock* const block : compiler->Blocks()) { if (block->bbNum > bbNumMaxBeforeResolution) { continue; } for (int statIndex = 0; statIndex < LsraStat::STAT_FREE; statIndex++) { unsigned lsraStat = blockInfo[block->bbNum].stats[statIndex]; sumStats[statIndex] += lsraStat; wtdStats[statIndex] += (lsraStat * block->bbWeight); } } for (int regSelectI = 0; regSelectI < LsraStat::STAT_FREE; regSelectI++) { fprintf(file, ", %s %u %sWt %f", getStatName(regSelectI), sumStats[regSelectI], getStatName(regSelectI), wtdStats[regSelectI]); } } #endif // TRACK_LSRA_STATS #ifdef DEBUG void dumpRegMask(regMaskTP regs) { if (regs == RBM_ALLINT) { printf("[allInt]"); } else if (regs == (RBM_ALLINT & ~RBM_FPBASE)) { printf("[allIntButFP]"); } else if (regs == RBM_ALLFLOAT) { printf("[allFloat]"); } else if (regs == RBM_ALLDOUBLE) { printf("[allDouble]"); } else { dspRegMask(regs); } } static const char* getRefTypeName(RefType refType) { switch (refType) { #define DEF_REFTYPE(memberName, memberValue, shortName) \ case memberName: \ return #memberName; #include "lsra_reftypes.h" #undef DEF_REFTYPE default: return nullptr; } } static const char* getRefTypeShortName(RefType refType) { switch (refType) { #define DEF_REFTYPE(memberName, memberValue, shortName) \ case memberName: \ return shortName; #include "lsra_reftypes.h" #undef DEF_REFTYPE default: return nullptr; } } //------------------------------------------------------------------------ // getScoreName: Returns the texual name of register score const char* LinearScan::getScoreName(RegisterScore score) { switch (score) { #define REG_SEL_DEF(stat, value, shortname, orderSeqId) \ case stat: \ return shortname; #include "lsra_score.h" #undef REG_SEL_DEF default: return " - "; } } void RefPosition::dump(LinearScan* linearScan) { printf("<RefPosition #%-3u @%-3u", rpNum, nodeLocation); printf(" %s ", getRefTypeName(refType)); if (this->IsPhysRegRef()) { this->getReg()->tinyDump(); } else if (getInterval()) { this->getInterval()->tinyDump(); } if (this->treeNode) { printf("%s", treeNode->OpName(treeNode->OperGet())); if (this->treeNode->IsMultiRegNode()) { printf("[%d]", this->multiRegIdx); } } printf(" " FMT_BB " ", this->bbNum); printf("regmask="); dumpRegMask(registerAssignment); printf(" minReg=%d", minRegCandidateCount); if (this->lastUse) { printf(" last"); } if (this->reload) { printf(" reload"); } if (this->spillAfter) { printf(" spillAfter"); } if (this->singleDefSpill) { printf(" singleDefSpill"); } if (this->writeThru) { printf(" writeThru"); } if (this->moveReg) { printf(" move"); } if (this->copyReg) { printf(" copy"); } if (this->isFixedRegRef) { printf(" fixed"); } if (this->isLocalDefUse) { printf(" local"); } if (this->delayRegFree) { printf(" delay"); } if (this->outOfOrder) { printf(" outOfOrder"); } if (this->RegOptional()) { printf(" regOptional"); } printf(" wt=%.2f", linearScan->getWeight(this)); printf(">\n"); } void RegRecord::dump() { tinyDump(); } void Interval::dump() { printf("Interval %2u:", intervalIndex); if (isLocalVar) { printf(" (V%02u)", varNum); } else if (IsUpperVector()) { assert(relatedInterval != nullptr); printf(" (U%02u)", relatedInterval->varNum); } printf(" %s", varTypeName(registerType)); if (isInternal) { printf(" (INTERNAL)"); } if (isSpilled) { printf(" (SPILLED)"); } if (isSplit) { printf(" (SPLIT)"); } if (isStructField) { printf(" (field)"); } if (isPromotedStruct) { printf(" (promoted struct)"); } if (hasConflictingDefUse) { printf(" (def-use conflict)"); } if (hasInterferingUses) { printf(" (interfering uses)"); } if (isSpecialPutArg) { printf(" (specialPutArg)"); } if (isConstant) { printf(" (constant)"); } if (isWriteThru) { printf(" (writeThru)"); } printf(" RefPositions {"); for (RefPosition* refPosition = this->firstRefPosition; refPosition != nullptr; refPosition = refPosition->nextRefPosition) { printf("#%u@%u", refPosition->rpNum, refPosition->nodeLocation); if (refPosition->nextRefPosition) { printf(" "); } } printf("}"); // this is not used (yet?) // printf(" SpillOffset %d", this->spillOffset); printf(" physReg:%s", getRegName(physReg)); printf(" Preferences="); dumpRegMask(this->registerPreferences); if (relatedInterval) { printf(" RelatedInterval "); relatedInterval->microDump(); } printf("\n"); } // print out very concise representation void Interval::tinyDump() { printf("<Ivl:%u", intervalIndex); if (isLocalVar) { printf(" V%02u", varNum); } else if (IsUpperVector()) { assert(relatedInterval != nullptr); printf(" (U%02u)", relatedInterval->varNum); } else if (isInternal) { printf(" internal"); } printf("> "); } // print out extremely concise representation void Interval::microDump() { if (isLocalVar) { printf("<V%02u/L%u>", varNum, intervalIndex); return; } else if (IsUpperVector()) { assert(relatedInterval != nullptr); printf(" (U%02u)", relatedInterval->varNum); } char intervalTypeChar = 'I'; if (isInternal) { intervalTypeChar = 'T'; } printf("<%c%u>", intervalTypeChar, intervalIndex); } void RegRecord::tinyDump() { printf("<Reg:%-3s> ", getRegName(regNum)); } void LinearScan::dumpDefList() { if (!VERBOSE) { return; } JITDUMP("DefList: { "); bool first = true; for (RefInfoListNode *listNode = defList.Begin(), *end = defList.End(); listNode != end; listNode = listNode->Next()) { GenTree* node = listNode->treeNode; JITDUMP("%sN%03u.t%d. %s", first ? "" : "; ", node->gtSeqNum, node->gtTreeID, GenTree::OpName(node->OperGet())); first = false; } JITDUMP(" }\n"); } void LinearScan::lsraDumpIntervals(const char* msg) { printf("\nLinear scan intervals %s:\n", msg); for (Interval& interval : intervals) { // only dump something if it has references // if (interval->firstRefPosition) interval.dump(); } printf("\n"); } // Dumps a tree node as a destination or source operand, with the style // of dump dependent on the mode void LinearScan::lsraGetOperandString(GenTree* tree, LsraTupleDumpMode mode, char* operandString, unsigned operandStringLength) { const char* lastUseChar = ""; if (tree->OperIsScalarLocal() && ((tree->gtFlags & GTF_VAR_DEATH) != 0)) { lastUseChar = "*"; } switch (mode) { case LinearScan::LSRA_DUMP_PRE: case LinearScan::LSRA_DUMP_REFPOS: _snprintf_s(operandString, operandStringLength, operandStringLength, "t%d%s", tree->gtTreeID, lastUseChar); break; case LinearScan::LSRA_DUMP_POST: { Compiler* compiler = JitTls::GetCompiler(); if (!tree->gtHasReg(compiler)) { _snprintf_s(operandString, operandStringLength, operandStringLength, "STK%s", lastUseChar); } else { int charCount = _snprintf_s(operandString, operandStringLength, operandStringLength, "%s%s", getRegName(tree->GetRegNum()), lastUseChar); operandString += charCount; operandStringLength -= charCount; if (tree->IsMultiRegNode()) { unsigned regCount = tree->GetMultiRegCount(compiler); for (unsigned regIndex = 1; regIndex < regCount; regIndex++) { charCount = _snprintf_s(operandString, operandStringLength, operandStringLength, ",%s%s", getRegName(tree->GetRegByIndex(regIndex)), lastUseChar); operandString += charCount; operandStringLength -= charCount; } } } } break; default: printf("ERROR: INVALID TUPLE DUMP MODE\n"); break; } } void LinearScan::lsraDispNode(GenTree* tree, LsraTupleDumpMode mode, bool hasDest) { Compiler* compiler = JitTls::GetCompiler(); const unsigned operandStringLength = 6 * MAX_MULTIREG_COUNT + 1; char operandString[operandStringLength]; const char* emptyDestOperand = " "; char spillChar = ' '; if (mode == LinearScan::LSRA_DUMP_POST) { if ((tree->gtFlags & GTF_SPILL) != 0) { spillChar = 'S'; } if (!hasDest && tree->gtHasReg(compiler)) { // A node can define a register, but not produce a value for a parent to consume, // i.e. in the "localDefUse" case. // There used to be an assert here that we wouldn't spill such a node. // However, we can have unused lclVars that wind up being the node at which // it is spilled. This probably indicates a bug, but we don't realy want to // assert during a dump. if (spillChar == 'S') { spillChar = '$'; } else { spillChar = '*'; } hasDest = true; } } printf("%c N%03u. ", spillChar, tree->gtSeqNum); LclVarDsc* varDsc = nullptr; unsigned varNum = UINT_MAX; if (tree->IsLocal()) { varNum = tree->AsLclVarCommon()->GetLclNum(); varDsc = compiler->lvaGetDesc(varNum); if (varDsc->lvLRACandidate) { hasDest = false; } } if (hasDest) { if (mode == LinearScan::LSRA_DUMP_POST && tree->gtFlags & GTF_SPILLED) { assert(tree->gtHasReg(compiler)); } lsraGetOperandString(tree, mode, operandString, operandStringLength); printf("%-15s =", operandString); } else { printf("%-15s ", emptyDestOperand); } if (varDsc != nullptr) { if (varDsc->lvLRACandidate) { if (mode == LSRA_DUMP_REFPOS) { printf(" V%02u(L%d)", varNum, getIntervalForLocalVar(varDsc->lvVarIndex)->intervalIndex); } else { lsraGetOperandString(tree, mode, operandString, operandStringLength); printf(" V%02u(%s)", varNum, operandString); if (mode == LinearScan::LSRA_DUMP_POST && tree->gtFlags & GTF_SPILLED) { printf("R"); } } } else { printf(" V%02u MEM", varNum); } } else if (tree->OperIs(GT_ASG)) { assert(!tree->gtHasReg(compiler)); printf(" asg%s ", GenTree::OpName(tree->OperGet())); } else { compiler->gtDispNodeName(tree); if (tree->OperKind() & GTK_LEAF) { compiler->gtDispLeaf(tree, nullptr); } } } //------------------------------------------------------------------------ // DumpOperandDefs: dumps the registers defined by a node. // // Arguments: // operand - The operand for which to compute a register count. // // Returns: // The number of registers defined by `operand`. // void LinearScan::DumpOperandDefs( GenTree* operand, bool& first, LsraTupleDumpMode mode, char* operandString, const unsigned operandStringLength) { assert(operand != nullptr); assert(operandString != nullptr); if (operand->OperIs(GT_ARGPLACE)) { return; } int dstCount = ComputeOperandDstCount(operand); if (dstCount != 0) { // This operand directly produces registers; print it. if (!first) { printf(","); } lsraGetOperandString(operand, mode, operandString, operandStringLength); printf("%s", operandString); first = false; } else if (operand->isContained()) { // This is a contained node. Dump the defs produced by its operands. for (GenTree* op : operand->Operands()) { DumpOperandDefs(op, first, mode, operandString, operandStringLength); } } } void LinearScan::TupleStyleDump(LsraTupleDumpMode mode) { BasicBlock* block; LsraLocation currentLoc = 1; // 0 is the entry const unsigned operandStringLength = 6 * MAX_MULTIREG_COUNT + 1; char operandString[operandStringLength]; // currentRefPosition is not used for LSRA_DUMP_PRE // We keep separate iterators for defs, so that we can print them // on the lhs of the dump RefPositionIterator refPosIterator = refPositions.begin(); RefPosition* currentRefPosition = &refPosIterator; switch (mode) { case LSRA_DUMP_PRE: printf("TUPLE STYLE DUMP BEFORE LSRA\n"); break; case LSRA_DUMP_REFPOS: printf("TUPLE STYLE DUMP WITH REF POSITIONS\n"); break; case LSRA_DUMP_POST: printf("TUPLE STYLE DUMP WITH REGISTER ASSIGNMENTS\n"); break; default: printf("ERROR: INVALID TUPLE DUMP MODE\n"); return; } if (mode != LSRA_DUMP_PRE) { printf("Incoming Parameters: "); for (; refPosIterator != refPositions.end() && currentRefPosition->refType != RefTypeBB; ++refPosIterator, currentRefPosition = &refPosIterator) { Interval* interval = currentRefPosition->getInterval(); assert(interval != nullptr && interval->isLocalVar); printf(" V%02d", interval->varNum); if (mode == LSRA_DUMP_POST) { regNumber reg; if (currentRefPosition->registerAssignment == RBM_NONE) { reg = REG_STK; } else { reg = currentRefPosition->assignedReg(); } const LclVarDsc* varDsc = compiler->lvaGetDesc(interval->varNum); printf("("); regNumber assignedReg = varDsc->GetRegNum(); regNumber argReg = (varDsc->lvIsRegArg) ? varDsc->GetArgReg() : REG_STK; assert(reg == assignedReg || varDsc->lvRegister == false); if (reg != argReg) { printf(getRegName(argReg)); printf("=>"); } printf("%s)", getRegName(reg)); } } printf("\n"); } for (block = startBlockSequence(); block != nullptr; block = moveToNextBlock()) { currentLoc += 2; if (mode == LSRA_DUMP_REFPOS) { bool printedBlockHeader = false; // We should find the boundary RefPositions in the order of exposed uses, dummy defs, and the blocks for (; refPosIterator != refPositions.end() && (currentRefPosition->refType == RefTypeExpUse || currentRefPosition->refType == RefTypeDummyDef || (currentRefPosition->refType == RefTypeBB && !printedBlockHeader)); ++refPosIterator, currentRefPosition = &refPosIterator) { Interval* interval = nullptr; if (currentRefPosition->isIntervalRef()) { interval = currentRefPosition->getInterval(); } switch (currentRefPosition->refType) { case RefTypeExpUse: assert(interval != nullptr); assert(interval->isLocalVar); printf(" Exposed use of V%02u at #%d\n", interval->varNum, currentRefPosition->rpNum); break; case RefTypeDummyDef: assert(interval != nullptr); assert(interval->isLocalVar); printf(" Dummy def of V%02u at #%d\n", interval->varNum, currentRefPosition->rpNum); break; case RefTypeBB: block->dspBlockHeader(compiler); printedBlockHeader = true; printf("=====\n"); break; default: printf("Unexpected RefPosition type at #%d\n", currentRefPosition->rpNum); break; } } } else { block->dspBlockHeader(compiler); printf("=====\n"); } if (enregisterLocalVars && mode == LSRA_DUMP_POST && block != compiler->fgFirstBB && block->bbNum <= bbNumMaxBeforeResolution) { printf("Predecessor for variable locations: " FMT_BB "\n", blockInfo[block->bbNum].predBBNum); dumpInVarToRegMap(block); } if (block->bbNum > bbNumMaxBeforeResolution) { SplitEdgeInfo splitEdgeInfo; splitBBNumToTargetBBNumMap->Lookup(block->bbNum, &splitEdgeInfo); assert(splitEdgeInfo.toBBNum <= bbNumMaxBeforeResolution); assert(splitEdgeInfo.fromBBNum <= bbNumMaxBeforeResolution); printf("New block introduced for resolution from " FMT_BB " to " FMT_BB "\n", splitEdgeInfo.fromBBNum, splitEdgeInfo.toBBNum); } for (GenTree* node : LIR::AsRange(block)) { GenTree* tree = node; int produce = tree->IsValue() ? ComputeOperandDstCount(tree) : 0; int consume = ComputeAvailableSrcCount(tree); lsraDispNode(tree, mode, produce != 0 && mode != LSRA_DUMP_REFPOS); if (mode != LSRA_DUMP_REFPOS) { if (consume > 0) { printf("; "); bool first = true; for (GenTree* operand : tree->Operands()) { DumpOperandDefs(operand, first, mode, operandString, operandStringLength); } } } else { // Print each RefPosition on a new line, but // printing all the kills for each node on a single line // and combining the fixed regs with their associated def or use bool killPrinted = false; RefPosition* lastFixedRegRefPos = nullptr; for (; refPosIterator != refPositions.end() && (currentRefPosition->refType == RefTypeUse || currentRefPosition->refType == RefTypeFixedReg || currentRefPosition->refType == RefTypeKill || currentRefPosition->refType == RefTypeDef) && (currentRefPosition->nodeLocation == tree->gtSeqNum || currentRefPosition->nodeLocation == tree->gtSeqNum + 1); ++refPosIterator, currentRefPosition = &refPosIterator) { Interval* interval = nullptr; if (currentRefPosition->isIntervalRef()) { interval = currentRefPosition->getInterval(); } switch (currentRefPosition->refType) { case RefTypeUse: if (currentRefPosition->IsPhysRegRef()) { printf("\n Use:R%d(#%d)", currentRefPosition->getReg()->regNum, currentRefPosition->rpNum); } else { assert(interval != nullptr); printf("\n Use:"); interval->microDump(); printf("(#%d)", currentRefPosition->rpNum); if (currentRefPosition->isFixedRegRef && !interval->isInternal) { assert(genMaxOneBit(currentRefPosition->registerAssignment)); assert(lastFixedRegRefPos != nullptr); printf(" Fixed:%s(#%d)", getRegName(currentRefPosition->assignedReg()), lastFixedRegRefPos->rpNum); lastFixedRegRefPos = nullptr; } if (currentRefPosition->isLocalDefUse) { printf(" LocalDefUse"); } if (currentRefPosition->lastUse) { printf(" *"); } } break; case RefTypeDef: { // Print each def on a new line assert(interval != nullptr); printf("\n Def:"); interval->microDump(); printf("(#%d)", currentRefPosition->rpNum); if (currentRefPosition->isFixedRegRef) { assert(genMaxOneBit(currentRefPosition->registerAssignment)); printf(" %s", getRegName(currentRefPosition->assignedReg())); } if (currentRefPosition->isLocalDefUse) { printf(" LocalDefUse"); } if (currentRefPosition->lastUse) { printf(" *"); } if (interval->relatedInterval != nullptr) { printf(" Pref:"); interval->relatedInterval->microDump(); } } break; case RefTypeKill: if (!killPrinted) { printf("\n Kill: "); killPrinted = true; } printf(getRegName(currentRefPosition->assignedReg())); printf(" "); break; case RefTypeFixedReg: lastFixedRegRefPos = currentRefPosition; break; default: printf("Unexpected RefPosition type at #%d\n", currentRefPosition->rpNum); break; } } } printf("\n"); } if (enregisterLocalVars && mode == LSRA_DUMP_POST) { dumpOutVarToRegMap(block); } printf("\n"); } printf("\n\n"); } void LinearScan::dumpLsraAllocationEvent( LsraDumpEvent event, Interval* interval, regNumber reg, BasicBlock* currentBlock, RegisterScore registerScore) { if (!(VERBOSE)) { return; } if ((interval != nullptr) && (reg != REG_NA) && (reg != REG_STK)) { registersToDump |= getRegMask(reg, interval->registerType); dumpRegRecordTitleIfNeeded(); } switch (event) { // Conflicting def/use case LSRA_EVENT_DEFUSE_CONFLICT: dumpRefPositionShort(activeRefPosition, currentBlock); printf("DUconflict "); dumpRegRecords(); break; case LSRA_EVENT_DEFUSE_CASE1: printf(indentFormat, " Case #1 use defRegAssignment"); dumpRegRecords(); break; case LSRA_EVENT_DEFUSE_CASE2: printf(indentFormat, " Case #2 use useRegAssignment"); dumpRegRecords(); break; case LSRA_EVENT_DEFUSE_CASE3: printf(indentFormat, " Case #3 use useRegAssignment"); dumpRegRecords(); dumpRegRecords(); break; case LSRA_EVENT_DEFUSE_CASE4: printf(indentFormat, " Case #4 use defRegAssignment"); dumpRegRecords(); break; case LSRA_EVENT_DEFUSE_CASE5: printf(indentFormat, " Case #5 set def to all regs"); dumpRegRecords(); break; case LSRA_EVENT_DEFUSE_CASE6: printf(indentFormat, " Case #6 need a copy"); dumpRegRecords(); if (interval == nullptr) { printf(indentFormat, " NULL interval"); dumpRegRecords(); } else if (interval->firstRefPosition->multiRegIdx != 0) { printf(indentFormat, " (multiReg)"); dumpRegRecords(); } break; case LSRA_EVENT_SPILL: dumpRefPositionShort(activeRefPosition, currentBlock); assert(interval != nullptr && interval->assignedReg != nullptr); printf("Spill %-4s ", getRegName(interval->assignedReg->regNum)); dumpRegRecords(); break; // Restoring the previous register case LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL: case LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL_AFTER_SPILL: assert(interval != nullptr); if ((activeRefPosition == nullptr) || (activeRefPosition->refType == RefTypeBB)) { printf(emptyRefPositionFormat, ""); } else { dumpRefPositionShort(activeRefPosition, currentBlock); } printf((event == LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL) ? "Restr %-4s " : "SRstr %-4s ", getRegName(reg)); dumpRegRecords(); break; case LSRA_EVENT_DONE_KILL_GC_REFS: dumpRefPositionShort(activeRefPosition, currentBlock); printf("Done "); break; case LSRA_EVENT_NO_GC_KILLS: dumpRefPositionShort(activeRefPosition, currentBlock); printf("None "); break; // Block boundaries case LSRA_EVENT_START_BB: // The RefTypeBB comes after the RefTypeDummyDefs associated with that block, // so we may have a RefTypeDummyDef at the time we dump this event. // In that case we'll have another "EVENT" associated with it, so we need to // print the full line now. if (activeRefPosition->refType != RefTypeBB) { dumpNewBlock(currentBlock, activeRefPosition->nodeLocation); dumpRegRecords(); } else { dumpRefPositionShort(activeRefPosition, currentBlock); } break; // Allocation decisions case LSRA_EVENT_NEEDS_NEW_REG: dumpRefPositionShort(activeRefPosition, currentBlock); printf("Free %-4s ", getRegName(reg)); dumpRegRecords(); break; case LSRA_EVENT_ZERO_REF: assert(interval != nullptr && interval->isLocalVar); dumpRefPositionShort(activeRefPosition, currentBlock); printf("NoRef "); dumpRegRecords(); break; case LSRA_EVENT_FIXED_REG: case LSRA_EVENT_EXP_USE: case LSRA_EVENT_KEPT_ALLOCATION: dumpRefPositionShort(activeRefPosition, currentBlock); printf("Keep %-4s ", getRegName(reg)); break; case LSRA_EVENT_COPY_REG: assert(interval != nullptr && interval->recentRefPosition != nullptr); dumpRefPositionShort(activeRefPosition, currentBlock); if (allocationPassComplete || (registerScore == 0)) { printf("Copy %-4s ", getRegName(reg)); } else { printf("%-5s(C) %-4s ", getScoreName(registerScore), getRegName(reg)); } break; case LSRA_EVENT_MOVE_REG: assert(interval != nullptr && interval->recentRefPosition != nullptr); dumpRefPositionShort(activeRefPosition, currentBlock); printf("Move %-4s ", getRegName(reg)); dumpRegRecords(); break; case LSRA_EVENT_ALLOC_REG: dumpRefPositionShort(activeRefPosition, currentBlock); if (allocationPassComplete || (registerScore == 0)) { printf("Alloc %-4s ", getRegName(reg)); } else { printf("%-5s(A) %-4s ", getScoreName(registerScore), getRegName(reg)); } break; case LSRA_EVENT_REUSE_REG: dumpRefPositionShort(activeRefPosition, currentBlock); if (allocationPassComplete || (registerScore == 0)) { printf("Reuse %-4s ", getRegName(reg)); } else { printf("%-5s(A) %-4s ", getScoreName(registerScore), getRegName(reg)); } break; case LSRA_EVENT_NO_ENTRY_REG_ALLOCATED: assert(interval != nullptr && interval->isLocalVar); dumpRefPositionShort(activeRefPosition, currentBlock); printf("LoRef "); break; case LSRA_EVENT_NO_REG_ALLOCATED: dumpRefPositionShort(activeRefPosition, currentBlock); printf("NoReg "); break; case LSRA_EVENT_RELOAD: dumpRefPositionShort(activeRefPosition, currentBlock); printf("ReLod %-4s ", getRegName(reg)); dumpRegRecords(); break; case LSRA_EVENT_SPECIAL_PUTARG: dumpRefPositionShort(activeRefPosition, currentBlock); printf("PtArg %-4s ", getRegName(reg)); break; case LSRA_EVENT_UPPER_VECTOR_SAVE: dumpRefPositionShort(activeRefPosition, currentBlock); printf("UVSav %-4s ", getRegName(reg)); break; case LSRA_EVENT_UPPER_VECTOR_RESTORE: dumpRefPositionShort(activeRefPosition, currentBlock); printf("UVRes %-4s ", getRegName(reg)); break; // We currently don't dump anything for these events. case LSRA_EVENT_DEFUSE_FIXED_DELAY_USE: case LSRA_EVENT_SPILL_EXTENDED_LIFETIME: case LSRA_EVENT_END_BB: case LSRA_EVENT_FREE_REGS: case LSRA_EVENT_INCREMENT_RANGE_END: case LSRA_EVENT_LAST_USE: case LSRA_EVENT_LAST_USE_DELAYED: break; default: printf("????? %-4s ", getRegName(reg)); dumpRegRecords(); break; } } //------------------------------------------------------------------------ // dumpRegRecordHeader: Dump the header for a column-based dump of the register state. // // Arguments: // None. // // Return Value: // None. // // Assumptions: // Reg names fit in 4 characters (minimum width of the columns) // // Notes: // In order to make the table as dense as possible (for ease of reading the dumps), // we determine the minimum regColumnWidth width required to represent: // regs, by name (e.g. eax or xmm0) - this is fixed at 4 characters. // intervals, as Vnn for lclVar intervals, or as I<num> for other intervals. // The table is indented by the amount needed for dumpRefPositionShort, which is // captured in shortRefPositionDumpWidth. // void LinearScan::dumpRegRecordHeader() { printf("The following table has one or more rows for each RefPosition that is handled during allocation.\n" "The first column provides the basic information about the RefPosition, with its type (e.g. Def,\n" "Use, Fixd) followed by a '*' if it is a last use, and a 'D' if it is delayRegFree, and then the\n" "action taken during allocation (e.g. Alloc a new register, or Keep an existing one).\n" "The subsequent columns show the Interval occupying each register, if any, followed by 'a' if it is\n" "active, a 'p' if it is a large vector that has been partially spilled, and 'i'if it is inactive.\n" "Columns are only printed up to the last modifed register, which may increase during allocation,\n" "in which case additional columns will appear. \n" "Registers which are not marked modified have ---- in their column.\n\n"); // First, determine the width of each register column (which holds a reg name in the // header, and an interval name in each subsequent row). int intervalNumberWidth = (int)log10((double)intervals.size()) + 1; // The regColumnWidth includes the identifying character (I or V) and an 'i', 'p' or 'a' (inactive, // partially-spilled or active) regColumnWidth = intervalNumberWidth + 2; if (regColumnWidth < 4) { regColumnWidth = 4; } sprintf_s(intervalNameFormat, MAX_FORMAT_CHARS, "%%c%%-%dd", regColumnWidth - 2); sprintf_s(regNameFormat, MAX_FORMAT_CHARS, "%%-%ds", regColumnWidth); // Next, determine the width of the short RefPosition (see dumpRefPositionShort()). // This is in the form: // nnn.#mmm NAME TYPEld // Where: // nnn is the Location, right-justified to the width needed for the highest location. // mmm is the RefPosition rpNum, left-justified to the width needed for the highest rpNum. // NAME is dumped by dumpReferentName(), and is "regColumnWidth". // TYPE is RefTypeNameShort, and is 4 characters // l is either '*' (if a last use) or ' ' (otherwise) // d is either 'D' (if a delayed use) or ' ' (otherwise) maxNodeLocation = (maxNodeLocation == 0) ? 1 : maxNodeLocation; // corner case of a method with an infinite loop without any gentree nodes assert(maxNodeLocation >= 1); assert(refPositions.size() >= 1); int nodeLocationWidth = (int)log10((double)maxNodeLocation) + 1; int refPositionWidth = (int)log10((double)refPositions.size()) + 1; int refTypeInfoWidth = 4 /*TYPE*/ + 2 /* last-use and delayed */ + 1 /* space */; int locationAndRPNumWidth = nodeLocationWidth + 2 /* .# */ + refPositionWidth + 1 /* space */; int shortRefPositionDumpWidth = locationAndRPNumWidth + regColumnWidth + 1 /* space */ + refTypeInfoWidth; sprintf_s(shortRefPositionFormat, MAX_FORMAT_CHARS, "%%%dd.#%%-%dd ", nodeLocationWidth, refPositionWidth); sprintf_s(emptyRefPositionFormat, MAX_FORMAT_CHARS, "%%-%ds", shortRefPositionDumpWidth); // The width of the "allocation info" // - a 8-character allocation decision // - a space // - a 4-character register // - a space int allocationInfoWidth = 8 + 1 + 4 + 1; // Next, determine the width of the legend for each row. This includes: // - a short RefPosition dump (shortRefPositionDumpWidth), which includes a space // - the allocation info (allocationInfoWidth), which also includes a space regTableIndent = shortRefPositionDumpWidth + allocationInfoWidth; // BBnn printed left-justified in the NAME Typeld and allocationInfo space. int bbNumWidth = (int)log10((double)compiler->fgBBNumMax) + 1; // In the unlikely event that BB numbers overflow the space, we'll simply omit the predBB int predBBNumDumpSpace = regTableIndent - locationAndRPNumWidth - bbNumWidth - 9; // 'BB' + ' PredBB' if (predBBNumDumpSpace < bbNumWidth) { sprintf_s(bbRefPosFormat, MAX_LEGEND_FORMAT_CHARS, "BB%%-%dd", shortRefPositionDumpWidth - 2); } else { sprintf_s(bbRefPosFormat, MAX_LEGEND_FORMAT_CHARS, "BB%%-%dd PredBB%%-%dd", bbNumWidth, predBBNumDumpSpace); } if (compiler->shouldDumpASCIITrees()) { columnSeparator = "|"; line = "-"; leftBox = "+"; middleBox = "+"; rightBox = "+"; } else { columnSeparator = "\xe2\x94\x82"; line = "\xe2\x94\x80"; leftBox = "\xe2\x94\x9c"; middleBox = "\xe2\x94\xbc"; rightBox = "\xe2\x94\xa4"; } sprintf_s(indentFormat, MAX_FORMAT_CHARS, "%%-%ds", regTableIndent); // Now, set up the legend format for the RefPosition info sprintf_s(legendFormat, MAX_LEGEND_FORMAT_CHARS, "%%-%d.%ds%%-%d.%ds%%-%ds%%s", nodeLocationWidth + 1, nodeLocationWidth + 1, refPositionWidth + 2, refPositionWidth + 2, regColumnWidth + 1); // Print a "title row" including the legend and the reg names. lastDumpedRegisters = RBM_NONE; dumpRegRecordTitleIfNeeded(); } void LinearScan::dumpRegRecordTitleIfNeeded() { if ((lastDumpedRegisters != registersToDump) || (rowCountSinceLastTitle > MAX_ROWS_BETWEEN_TITLES)) { lastUsedRegNumIndex = 0; int lastRegNumIndex = compiler->compFloatingPointUsed ? REG_FP_LAST : REG_INT_LAST; for (int regNumIndex = 0; regNumIndex <= lastRegNumIndex; regNumIndex++) { if ((registersToDump & genRegMask((regNumber)regNumIndex)) != 0) { lastUsedRegNumIndex = regNumIndex; } } dumpRegRecordTitle(); lastDumpedRegisters = registersToDump; } } void LinearScan::dumpRegRecordTitleLines() { for (int i = 0; i < regTableIndent; i++) { printf("%s", line); } for (int regNumIndex = 0; regNumIndex <= lastUsedRegNumIndex; regNumIndex++) { regNumber regNum = (regNumber)regNumIndex; if (shouldDumpReg(regNum)) { printf("%s", middleBox); for (int i = 0; i < regColumnWidth; i++) { printf("%s", line); } } } printf("%s\n", rightBox); } void LinearScan::dumpRegRecordTitle() { dumpRegRecordTitleLines(); // Print out the legend for the RefPosition info printf(legendFormat, "Loc ", "RP# ", "Name ", "Type Action Reg "); // Print out the register name column headers char columnFormatArray[MAX_FORMAT_CHARS]; sprintf_s(columnFormatArray, MAX_FORMAT_CHARS, "%s%%-%d.%ds", columnSeparator, regColumnWidth, regColumnWidth); for (int regNumIndex = 0; regNumIndex <= lastUsedRegNumIndex; regNumIndex++) { regNumber regNum = (regNumber)regNumIndex; if (shouldDumpReg(regNum)) { const char* regName = getRegName(regNum); printf(columnFormatArray, regName); } } printf("%s\n", columnSeparator); rowCountSinceLastTitle = 0; dumpRegRecordTitleLines(); } void LinearScan::dumpRegRecords() { static char columnFormatArray[18]; for (regNumber regNum = REG_FIRST; regNum <= (regNumber)lastUsedRegNumIndex; regNum = REG_NEXT(regNum)) { if (shouldDumpReg(regNum)) { printf("%s", columnSeparator); RegRecord& regRecord = physRegs[regNum]; Interval* interval = regRecord.assignedInterval; if (interval != nullptr) { dumpIntervalName(interval); char activeChar = interval->isActive ? 'a' : 'i'; #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE if (interval->isPartiallySpilled) { activeChar = 'p'; } #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE printf("%c", activeChar); } else if ((genRegMask(regNum) & regsBusyUntilKill) != RBM_NONE) { printf(columnFormatArray, "Busy"); } else { sprintf_s(columnFormatArray, MAX_FORMAT_CHARS, "%%-%ds", regColumnWidth); printf(columnFormatArray, ""); } } } printf("%s\n", columnSeparator); rowCountSinceLastTitle++; } void LinearScan::dumpIntervalName(Interval* interval) { if (interval->isLocalVar) { printf(intervalNameFormat, 'V', interval->varNum); } else if (interval->IsUpperVector()) { printf(intervalNameFormat, 'U', interval->relatedInterval->varNum); } else if (interval->isConstant) { printf(intervalNameFormat, 'C', interval->intervalIndex); } else { printf(intervalNameFormat, 'I', interval->intervalIndex); } } void LinearScan::dumpEmptyRefPosition() { printf(emptyRefPositionFormat, ""); } //------------------------------------------------------------------------ // dumpNewBlock: Dump a line for a new block in a column-based dump of the register state. // // Arguments: // currentBlock - the new block to be dumped // void LinearScan::dumpNewBlock(BasicBlock* currentBlock, LsraLocation location) { if (!VERBOSE) { return; } // Always print a title row before a RefTypeBB (except for the first, because we // will already have printed it before the parameters) if ((currentBlock != compiler->fgFirstBB) && (currentBlock != nullptr)) { dumpRegRecordTitle(); } // If the activeRefPosition is a DummyDef, then don't print anything further (printing the // title line makes it clearer that we're "about to" start the next block). if (activeRefPosition->refType == RefTypeDummyDef) { dumpEmptyRefPosition(); printf("DDefs "); printf(regNameFormat, ""); return; } printf(shortRefPositionFormat, location, activeRefPosition->rpNum); if (currentBlock == nullptr) { printf(regNameFormat, "END"); printf(" "); printf(regNameFormat, ""); } else { printf(bbRefPosFormat, currentBlock->bbNum, currentBlock == compiler->fgFirstBB ? 0 : blockInfo[currentBlock->bbNum].predBBNum); } } // Note that the size of this dump is computed in dumpRegRecordHeader(). // void LinearScan::dumpRefPositionShort(RefPosition* refPosition, BasicBlock* currentBlock) { static RefPosition* lastPrintedRefPosition = nullptr; if (refPosition == lastPrintedRefPosition) { dumpEmptyRefPosition(); return; } lastPrintedRefPosition = refPosition; if (refPosition->refType == RefTypeBB) { dumpNewBlock(currentBlock, refPosition->nodeLocation); return; } printf(shortRefPositionFormat, refPosition->nodeLocation, refPosition->rpNum); if (refPosition->isIntervalRef()) { Interval* interval = refPosition->getInterval(); dumpIntervalName(interval); char lastUseChar = ' '; char delayChar = ' '; if (refPosition->lastUse) { lastUseChar = '*'; if (refPosition->delayRegFree) { delayChar = 'D'; } } printf(" %s%c%c ", getRefTypeShortName(refPosition->refType), lastUseChar, delayChar); } else if (refPosition->IsPhysRegRef()) { RegRecord* regRecord = refPosition->getReg(); printf(regNameFormat, getRegName(regRecord->regNum)); printf(" %s ", getRefTypeShortName(refPosition->refType)); } else { assert(refPosition->refType == RefTypeKillGCRefs); // There's no interval or reg name associated with this. printf(regNameFormat, " "); printf(" %s ", getRefTypeShortName(refPosition->refType)); } } //------------------------------------------------------------------------ // LinearScan::IsResolutionMove: // Returns true if the given node is a move inserted by LSRA // resolution. // // Arguments: // node - the node to check. // bool LinearScan::IsResolutionMove(GenTree* node) { if (!IsLsraAdded(node)) { return false; } switch (node->OperGet()) { case GT_LCL_VAR: case GT_COPY: return node->IsUnusedValue(); case GT_SWAP: return true; default: return false; } } //------------------------------------------------------------------------ // LinearScan::IsResolutionNode: // Returns true if the given node is either a move inserted by LSRA // resolution or an operand to such a move. // // Arguments: // containingRange - the range that contains the node to check. // node - the node to check. // bool LinearScan::IsResolutionNode(LIR::Range& containingRange, GenTree* node) { for (;;) { if (IsResolutionMove(node)) { return true; } if (!IsLsraAdded(node) || (node->OperGet() != GT_LCL_VAR)) { return false; } LIR::Use use; bool foundUse = containingRange.TryGetUse(node, &use); assert(foundUse); node = use.User(); } } //------------------------------------------------------------------------ // verifyFinalAllocation: Traverse the RefPositions and verify various invariants. // // Arguments: // None. // // Return Value: // None. // // Notes: // If verbose is set, this will also dump a table of the final allocations. void LinearScan::verifyFinalAllocation() { if (VERBOSE) { printf("\nFinal allocation\n"); } // Clear register assignments. for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg)) { RegRecord* physRegRecord = getRegisterRecord(reg); physRegRecord->assignedInterval = nullptr; } for (Interval& interval : intervals) { interval.assignedReg = nullptr; interval.physReg = REG_NA; } DBEXEC(VERBOSE, dumpRegRecordTitle()); BasicBlock* currentBlock = nullptr; GenTree* firstBlockEndResolutionNode = nullptr; LsraLocation currentLocation = MinLocation; for (RefPosition& refPosition : refPositions) { RefPosition* currentRefPosition = &refPosition; Interval* interval = nullptr; RegRecord* regRecord = nullptr; regNumber regNum = REG_NA; activeRefPosition = currentRefPosition; if (currentRefPosition->refType != RefTypeBB) { if (currentRefPosition->IsPhysRegRef()) { regRecord = currentRefPosition->getReg(); regRecord->recentRefPosition = currentRefPosition; regNum = regRecord->regNum; } else if (currentRefPosition->isIntervalRef()) { interval = currentRefPosition->getInterval(); interval->recentRefPosition = currentRefPosition; if (currentRefPosition->registerAssignment != RBM_NONE) { if (!genMaxOneBit(currentRefPosition->registerAssignment)) { assert(currentRefPosition->refType == RefTypeExpUse || currentRefPosition->refType == RefTypeDummyDef); } else { regNum = currentRefPosition->assignedReg(); regRecord = getRegisterRecord(regNum); } } } } LsraLocation newLocation = currentRefPosition->nodeLocation; currentLocation = newLocation; switch (currentRefPosition->refType) { case RefTypeBB: { if (currentBlock == nullptr) { currentBlock = startBlockSequence(); } else { // Verify the resolution moves at the end of the previous block. for (GenTree* node = firstBlockEndResolutionNode; node != nullptr; node = node->gtNext) { assert(enregisterLocalVars); // Only verify nodes that are actually moves; don't bother with the nodes that are // operands to moves. if (IsResolutionMove(node)) { verifyResolutionMove(node, currentLocation); } } // Validate the locations at the end of the previous block. if (enregisterLocalVars) { VarToRegMap outVarToRegMap = outVarToRegMaps[currentBlock->bbNum]; VarSetOps::Iter iter(compiler, currentBlock->bbLiveOut); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { if (localVarIntervals[varIndex] == nullptr) { assert(!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate); continue; } regNumber regNum = getVarReg(outVarToRegMap, varIndex); interval = getIntervalForLocalVar(varIndex); if (interval->physReg != regNum) { assert(regNum == REG_STK); assert((interval->physReg == REG_NA) || interval->isWriteThru); } interval->physReg = REG_NA; interval->assignedReg = nullptr; interval->isActive = false; } } // Clear register assignments. for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg)) { RegRecord* physRegRecord = getRegisterRecord(reg); physRegRecord->assignedInterval = nullptr; } // Now, record the locations at the beginning of this block. currentBlock = moveToNextBlock(); } if (currentBlock != nullptr) { if (enregisterLocalVars) { VarToRegMap inVarToRegMap = inVarToRegMaps[currentBlock->bbNum]; VarSetOps::Iter iter(compiler, currentBlock->bbLiveIn); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { if (localVarIntervals[varIndex] == nullptr) { assert(!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate); continue; } regNumber regNum = getVarReg(inVarToRegMap, varIndex); interval = getIntervalForLocalVar(varIndex); interval->physReg = regNum; interval->assignedReg = &(physRegs[regNum]); interval->isActive = true; physRegs[regNum].assignedInterval = interval; } } if (VERBOSE) { dumpRefPositionShort(currentRefPosition, currentBlock); dumpRegRecords(); } // Finally, handle the resolution moves, if any, at the beginning of the next block. firstBlockEndResolutionNode = nullptr; bool foundNonResolutionNode = false; LIR::Range& currentBlockRange = LIR::AsRange(currentBlock); for (GenTree* node : currentBlockRange) { if (IsResolutionNode(currentBlockRange, node)) { assert(enregisterLocalVars); if (foundNonResolutionNode) { firstBlockEndResolutionNode = node; break; } else if (IsResolutionMove(node)) { // Only verify nodes that are actually moves; don't bother with the nodes that are // operands to moves. verifyResolutionMove(node, currentLocation); } } else { foundNonResolutionNode = true; } } } } break; case RefTypeKill: assert(regRecord != nullptr); assert(regRecord->assignedInterval == nullptr); dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, regRecord->regNum, currentBlock); break; case RefTypeFixedReg: assert(regRecord != nullptr); dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, regRecord->regNum, currentBlock); break; case RefTypeUpperVectorSave: dumpLsraAllocationEvent(LSRA_EVENT_UPPER_VECTOR_SAVE, nullptr, REG_NA, currentBlock); break; case RefTypeUpperVectorRestore: dumpLsraAllocationEvent(LSRA_EVENT_UPPER_VECTOR_RESTORE, nullptr, REG_NA, currentBlock); break; case RefTypeDef: case RefTypeUse: case RefTypeParamDef: case RefTypeZeroInit: assert(interval != nullptr); if (interval->isSpecialPutArg) { dumpLsraAllocationEvent(LSRA_EVENT_SPECIAL_PUTARG, interval, regNum); break; } if (currentRefPosition->reload) { interval->isActive = true; assert(regNum != REG_NA); interval->physReg = regNum; interval->assignedReg = regRecord; regRecord->assignedInterval = interval; dumpLsraAllocationEvent(LSRA_EVENT_RELOAD, nullptr, regRecord->regNum, currentBlock); } if (regNum == REG_NA) { // If this interval is still assigned to a register if (interval->physReg != REG_NA) { // then unassign it if no new register was assigned to the RefTypeDef if (RefTypeIsDef(currentRefPosition->refType)) { assert(interval->assignedReg != nullptr); if (interval->assignedReg->assignedInterval == interval) { interval->assignedReg->assignedInterval = nullptr; } interval->physReg = REG_NA; interval->assignedReg = nullptr; } } dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, interval); } else if (RefTypeIsDef(currentRefPosition->refType)) { interval->isActive = true; if (VERBOSE) { if (interval->isConstant && (currentRefPosition->treeNode != nullptr) && currentRefPosition->treeNode->IsReuseRegVal()) { dumpLsraAllocationEvent(LSRA_EVENT_REUSE_REG, nullptr, regRecord->regNum, currentBlock); } else { dumpLsraAllocationEvent(LSRA_EVENT_ALLOC_REG, nullptr, regRecord->regNum, currentBlock); } } } else if (currentRefPosition->copyReg) { dumpLsraAllocationEvent(LSRA_EVENT_COPY_REG, interval, regRecord->regNum, currentBlock); } else if (currentRefPosition->moveReg) { assert(interval->assignedReg != nullptr); interval->assignedReg->assignedInterval = nullptr; interval->physReg = regNum; interval->assignedReg = regRecord; regRecord->assignedInterval = interval; if (VERBOSE) { dumpEmptyRefPosition(); printf("Move %-4s ", getRegName(regRecord->regNum)); } } else { dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, regRecord->regNum, currentBlock); } if (currentRefPosition->lastUse || (currentRefPosition->spillAfter && !currentRefPosition->writeThru)) { interval->isActive = false; } if (regNum != REG_NA) { if (currentRefPosition->spillAfter) { if (VERBOSE) { // If refPos is marked as copyReg, then the reg that is spilled // is the homeReg of the interval not the reg currently assigned // to refPos. regNumber spillReg = regNum; if (currentRefPosition->copyReg) { assert(interval != nullptr); spillReg = interval->physReg; } dumpRegRecords(); dumpEmptyRefPosition(); if (currentRefPosition->writeThru) { printf("WThru %-4s ", getRegName(spillReg)); } else { printf("Spill %-4s ", getRegName(spillReg)); } } } else if (currentRefPosition->copyReg) { regRecord->assignedInterval = interval; } else { if (RefTypeIsDef(currentRefPosition->refType)) { // Interval was assigned to a different register. // Clear the assigned interval of current register. if (interval->physReg != REG_NA && interval->physReg != regNum) { interval->assignedReg->assignedInterval = nullptr; } } interval->physReg = regNum; interval->assignedReg = regRecord; regRecord->assignedInterval = interval; } } break; case RefTypeKillGCRefs: // No action to take. // However, we will assert that, at resolution time, no registers contain GC refs. { DBEXEC(VERBOSE, printf(" ")); regMaskTP candidateRegs = currentRefPosition->registerAssignment; while (candidateRegs != RBM_NONE) { regMaskTP nextRegBit = genFindLowestBit(candidateRegs); candidateRegs &= ~nextRegBit; regNumber nextReg = genRegNumFromMask(nextRegBit); RegRecord* regRecord = getRegisterRecord(nextReg); Interval* assignedInterval = regRecord->assignedInterval; assert(assignedInterval == nullptr || !varTypeIsGC(assignedInterval->registerType)); } } break; case RefTypeExpUse: case RefTypeDummyDef: // Do nothing; these will be handled by the RefTypeBB. DBEXEC(VERBOSE, dumpRefPositionShort(currentRefPosition, currentBlock)); DBEXEC(VERBOSE, printf(" ")); break; case RefTypeInvalid: // for these 'currentRefPosition->refType' values, No action to take break; } if (currentRefPosition->refType != RefTypeBB) { DBEXEC(VERBOSE, dumpRegRecords()); if (interval != nullptr) { if (currentRefPosition->copyReg) { assert(interval->physReg != regNum); regRecord->assignedInterval = nullptr; assert(interval->assignedReg != nullptr); regRecord = interval->assignedReg; } if (currentRefPosition->spillAfter || currentRefPosition->lastUse) { assert(!currentRefPosition->spillAfter || currentRefPosition->IsActualRef()); if (RefTypeIsDef(currentRefPosition->refType)) { // If an interval got assigned to a different register (while the different // register got spilled), then clear the assigned interval of current register. if (interval->physReg != REG_NA && interval->physReg != regNum) { interval->assignedReg->assignedInterval = nullptr; } } interval->physReg = REG_NA; interval->assignedReg = nullptr; // regRegcord could be null if the RefPosition does not require a register. if (regRecord != nullptr) { regRecord->assignedInterval = nullptr; } #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE else if (interval->isUpperVector && !currentRefPosition->RegOptional()) { // These only require a register if they are not RegOptional, and their lclVar // interval is living in a register and not already partially spilled. if ((currentRefPosition->refType == RefTypeUpperVectorSave) || (currentRefPosition->refType == RefTypeUpperVectorRestore)) { Interval* lclVarInterval = interval->relatedInterval; assert((lclVarInterval->physReg == REG_NA) || lclVarInterval->isPartiallySpilled); } } #endif else { assert(currentRefPosition->RegOptional()); } } } } } // Now, verify the resolution blocks. // Currently these are nearly always at the end of the method, but that may not always be the case. // So, we'll go through all the BBs looking for blocks whose bbNum is greater than bbNumMaxBeforeResolution. for (BasicBlock* const currentBlock : compiler->Blocks()) { if (currentBlock->bbNum > bbNumMaxBeforeResolution) { // If we haven't enregistered an lclVars, we have no resolution blocks. assert(enregisterLocalVars); if (VERBOSE) { dumpRegRecordTitle(); printf(shortRefPositionFormat, 0, 0); assert(currentBlock->bbPreds != nullptr && currentBlock->bbPreds->getBlock() != nullptr); printf(bbRefPosFormat, currentBlock->bbNum, currentBlock->bbPreds->getBlock()->bbNum); dumpRegRecords(); } // Clear register assignments. for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg)) { RegRecord* physRegRecord = getRegisterRecord(reg); physRegRecord->assignedInterval = nullptr; } // Set the incoming register assignments VarToRegMap inVarToRegMap = getInVarToRegMap(currentBlock->bbNum); VarSetOps::Iter iter(compiler, currentBlock->bbLiveIn); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { if (localVarIntervals[varIndex] == nullptr) { assert(!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate); continue; } regNumber regNum = getVarReg(inVarToRegMap, varIndex); Interval* interval = getIntervalForLocalVar(varIndex); interval->physReg = regNum; interval->assignedReg = &(physRegs[regNum]); interval->isActive = true; physRegs[regNum].assignedInterval = interval; } // Verify the moves in this block LIR::Range& currentBlockRange = LIR::AsRange(currentBlock); for (GenTree* node : currentBlockRange) { assert(IsResolutionNode(currentBlockRange, node)); if (IsResolutionMove(node)) { // Only verify nodes that are actually moves; don't bother with the nodes that are // operands to moves. verifyResolutionMove(node, currentLocation); } } // Verify the outgoing register assignments { VarToRegMap outVarToRegMap = getOutVarToRegMap(currentBlock->bbNum); VarSetOps::Iter iter(compiler, currentBlock->bbLiveOut); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { if (localVarIntervals[varIndex] == nullptr) { assert(!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate); continue; } regNumber regNum = getVarReg(outVarToRegMap, varIndex); Interval* interval = getIntervalForLocalVar(varIndex); // Either the register assignments match, or the outgoing assignment is on the stack // and this is a write-thru interval. assert(interval->physReg == regNum || (interval->physReg == REG_NA && regNum == REG_STK) || (interval->isWriteThru && regNum == REG_STK)); interval->physReg = REG_NA; interval->assignedReg = nullptr; interval->isActive = false; } } } } DBEXEC(VERBOSE, printf("\n")); } //------------------------------------------------------------------------ // verifyResolutionMove: Verify a resolution statement. Called by verifyFinalAllocation() // // Arguments: // resolutionMove - A GenTree* that must be a resolution move. // currentLocation - The LsraLocation of the most recent RefPosition that has been verified. // // Return Value: // None. // // Notes: // If verbose is set, this will also dump the moves into the table of final allocations. void LinearScan::verifyResolutionMove(GenTree* resolutionMove, LsraLocation currentLocation) { GenTree* dst = resolutionMove; assert(IsResolutionMove(dst)); if (dst->OperGet() == GT_SWAP) { GenTreeLclVarCommon* left = dst->gtGetOp1()->AsLclVarCommon(); GenTreeLclVarCommon* right = dst->gtGetOp2()->AsLclVarCommon(); regNumber leftRegNum = left->GetRegNum(); regNumber rightRegNum = right->GetRegNum(); LclVarDsc* leftVarDsc = compiler->lvaGetDesc(left); LclVarDsc* rightVarDsc = compiler->lvaGetDesc(right); Interval* leftInterval = getIntervalForLocalVar(leftVarDsc->lvVarIndex); Interval* rightInterval = getIntervalForLocalVar(rightVarDsc->lvVarIndex); assert(leftInterval->physReg == leftRegNum && rightInterval->physReg == rightRegNum); leftInterval->physReg = rightRegNum; rightInterval->physReg = leftRegNum; leftInterval->assignedReg = &physRegs[rightRegNum]; rightInterval->assignedReg = &physRegs[leftRegNum]; physRegs[rightRegNum].assignedInterval = leftInterval; physRegs[leftRegNum].assignedInterval = rightInterval; if (VERBOSE) { printf(shortRefPositionFormat, currentLocation, 0); dumpIntervalName(leftInterval); printf(" Swap "); printf(" %-4s ", getRegName(rightRegNum)); dumpRegRecords(); printf(shortRefPositionFormat, currentLocation, 0); dumpIntervalName(rightInterval); printf(" \" "); printf(" %-4s ", getRegName(leftRegNum)); dumpRegRecords(); } return; } regNumber dstRegNum = dst->GetRegNum(); regNumber srcRegNum; GenTreeLclVarCommon* lcl; if (dst->OperGet() == GT_COPY) { lcl = dst->gtGetOp1()->AsLclVarCommon(); srcRegNum = lcl->GetRegNum(); } else { lcl = dst->AsLclVarCommon(); if ((lcl->gtFlags & GTF_SPILLED) != 0) { srcRegNum = REG_STK; } else { assert((lcl->gtFlags & GTF_SPILL) != 0); srcRegNum = dstRegNum; dstRegNum = REG_STK; } } Interval* interval = getIntervalForLocalVarNode(lcl); assert(interval->physReg == srcRegNum || (srcRegNum == REG_STK && interval->physReg == REG_NA)); if (srcRegNum != REG_STK) { physRegs[srcRegNum].assignedInterval = nullptr; } if (dstRegNum != REG_STK) { interval->physReg = dstRegNum; interval->assignedReg = &(physRegs[dstRegNum]); physRegs[dstRegNum].assignedInterval = interval; interval->isActive = true; } else { interval->physReg = REG_NA; interval->assignedReg = nullptr; interval->isActive = false; } if (VERBOSE) { printf(shortRefPositionFormat, currentLocation, 0); dumpIntervalName(interval); printf(" Move "); printf(" %-4s ", getRegName(dstRegNum)); dumpRegRecords(); } } #endif // DEBUG LinearScan::RegisterSelection::RegisterSelection(LinearScan* linearScan) { this->linearScan = linearScan; #ifdef DEBUG mappingTable = new ScoreMappingTable(linearScan->compiler->getAllocator(CMK_LSRA)); #define REG_SEL_DEF(stat, value, shortname, orderSeqId) \ mappingTable->Set(stat, &LinearScan::RegisterSelection::try_##stat); #include "lsra_score.h" #undef REG_SEL_DEF LPCWSTR ordering = JitConfig.JitLsraOrdering(); if (ordering == nullptr) { ordering = W("ABCDEFGHIJKLMNOPQ"); } for (int orderId = 0; orderId < REGSELECT_HEURISTIC_COUNT; orderId++) { // Make sure we do not set repeated entries assert(RegSelectionOrder[orderId] == NONE); switch (ordering[orderId]) { #define REG_SEL_DEF(enum_name, value, shortname, orderSeqId) \ case orderSeqId: \ RegSelectionOrder[orderId] = enum_name; \ break; #include "lsra_score.h" #undef REG_SEL_DEF default: assert(!"Invalid lsraOrdering value."); } } #endif // DEBUG } // ---------------------------------------------------------- // reset: Resets the values of all the fields used for register selection. // void LinearScan::RegisterSelection::reset(Interval* interval, RefPosition* refPos) { currentInterval = interval; refPosition = refPos; score = 0; regType = linearScan->getRegisterType(currentInterval, refPosition); currentLocation = refPosition->nodeLocation; nextRefPos = refPosition->nextRefPosition; candidates = refPosition->registerAssignment; preferences = currentInterval->registerPreferences; // This is not actually a preference, it's merely to track the lclVar that this // "specialPutArg" is using. relatedInterval = currentInterval->isSpecialPutArg ? nullptr : currentInterval->relatedInterval; relatedPreferences = (relatedInterval == nullptr) ? RBM_NONE : relatedInterval->getCurrentPreferences(); rangeEndLocation = refPosition->getRangeEndLocation(); relatedLastLocation = rangeEndLocation; preferCalleeSave = currentInterval->preferCalleeSave; rangeEndRefPosition = nullptr; lastRefPosition = currentInterval->lastRefPosition; lastLocation = MinLocation; prevRegRec = currentInterval->assignedReg; // These are used in the post-selection updates, and must be set for any selection. freeCandidates = RBM_NONE; matchingConstants = RBM_NONE; unassignedSet = RBM_NONE; coversSet = RBM_NONE; preferenceSet = RBM_NONE; coversRelatedSet = RBM_NONE; coversFullSet = RBM_NONE; foundRegBit = REG_NA; found = false; skipAllocation = false; coversSetsCalculated = false; } // ---------------------------------------------------------- // applySelection: Apply the heuristic to the candidates. // // Arguments: // selectionScore: The score corresponding to the heuristics we apply. // selectionCandidates: The possible candidates for the heuristic to apply. // // Return Values: // 'true' if there was a single register candidate available after the heuristic is applied. // bool LinearScan::RegisterSelection::applySelection(int selectionScore, regMaskTP selectionCandidates) { regMaskTP newCandidates = candidates & selectionCandidates; if (newCandidates != RBM_NONE) { score += selectionScore; candidates = newCandidates; return LinearScan::isSingleRegister(candidates); } return false; } // ---------------------------------------------------------- // applySingleRegSelection: Select a single register, if it is in the candidate set. // // Arguments: // selectionScore: The score corresponding to the heuristics we apply. // selectionCandidates: The possible candidates for the heuristic to apply. // // Return Values: // 'true' if there was a single register candidate available after the heuristic is applied. // bool LinearScan::RegisterSelection::applySingleRegSelection(int selectionScore, regMaskTP selectionCandidate) { assert(LinearScan::isSingleRegister(selectionCandidate)); regMaskTP newCandidates = candidates & selectionCandidate; if (newCandidates != RBM_NONE) { candidates = newCandidates; return true; } return false; } // ---------------------------------------------------------- // try_FREE: Apply the FREE heuristic. // void LinearScan::RegisterSelection::try_FREE() { assert(!found); if (freeCandidates == RBM_NONE) { return; } found = applySelection(FREE, freeCandidates); } // ---------------------------------------------------------- // try_CONST_AVAILABLE: Apply the CONST_AVAILABLE (matching constant) heuristic. // // Note: we always need to define the 'matchingConstants' set. // void LinearScan::RegisterSelection::try_CONST_AVAILABLE() { assert(!found); if (freeCandidates == RBM_NONE) { return; } if (currentInterval->isConstant && RefTypeIsDef(refPosition->refType)) { found = applySelection(CONST_AVAILABLE, matchingConstants); } } // ---------------------------------------------------------- // try_THIS_ASSIGNED: Apply the THIS_ASSIGNED heuristic. // void LinearScan::RegisterSelection::try_THIS_ASSIGNED() { assert(!found); if (freeCandidates == RBM_NONE) { return; } if (prevRegRec != nullptr) { found = applySelection(THIS_ASSIGNED, freeCandidates & preferences & prevRegBit); } } // ---------------------------------------------------------- // try_COVERS: Apply the COVERS heuristic. // void LinearScan::RegisterSelection::try_COVERS() { assert(!found); calculateCoversSets(); found = applySelection(COVERS, coversSet & preferenceSet); } // ---------------------------------------------------------- // try_OWN_PREFERENCE: Apply the OWN_PREFERENCE heuristic. // // Note: 'preferenceSet' already includes only freeCandidates. // void LinearScan::RegisterSelection::try_OWN_PREFERENCE() { assert(!found); #ifdef DEBUG calculateCoversSets(); #endif found = applySelection(OWN_PREFERENCE, (preferenceSet & freeCandidates)); } // ---------------------------------------------------------- // try_COVERS_RELATED: Apply the COVERS_RELATED heuristic. // void LinearScan::RegisterSelection::try_COVERS_RELATED() { assert(!found); #ifdef DEBUG calculateCoversSets(); #endif found = applySelection(COVERS_RELATED, (coversRelatedSet & freeCandidates)); } // ---------------------------------------------------------- // try_RELATED_PREFERENCE: Apply the RELATED_PREFERENCE heuristic. // void LinearScan::RegisterSelection::try_RELATED_PREFERENCE() { assert(!found); found = applySelection(RELATED_PREFERENCE, relatedPreferences & freeCandidates); } // ---------------------------------------------------------- // try_CALLER_CALLEE: Apply the CALLER_CALLEE heuristic. // void LinearScan::RegisterSelection::try_CALLER_CALLEE() { assert(!found); found = applySelection(CALLER_CALLEE, callerCalleePrefs & freeCandidates); } // ---------------------------------------------------------- // try_UNASSIGNED: Apply the UNASSIGNED heuristic. // void LinearScan::RegisterSelection::try_UNASSIGNED() { assert(!found); #ifdef DEBUG calculateCoversSets(); #endif found = applySelection(UNASSIGNED, unassignedSet); } // ---------------------------------------------------------- // try_COVERS_FULL: Apply the COVERS_FULL heuristic. // void LinearScan::RegisterSelection::try_COVERS_FULL() { assert(!found); #ifdef DEBUG calculateCoversSets(); #endif found = applySelection(COVERS_FULL, (coversFullSet & freeCandidates)); } // ---------------------------------------------------------- // try_BEST_FIT: Apply the BEST_FIT heuristic. // void LinearScan::RegisterSelection::try_BEST_FIT() { assert(!found); if (freeCandidates == RBM_NONE) { return; } regMaskTP bestFitSet = RBM_NONE; // If the best score includes COVERS_FULL, pick the one that's killed soonest. // If none cover the full range, the BEST_FIT is the one that's killed later. bool earliestIsBest = ((score & COVERS_FULL) != 0); LsraLocation bestFitLocation = earliestIsBest ? MaxLocation : MinLocation; for (regMaskTP bestFitCandidates = candidates; bestFitCandidates != RBM_NONE;) { regMaskTP bestFitCandidateBit = genFindLowestBit(bestFitCandidates); bestFitCandidates &= ~bestFitCandidateBit; regNumber bestFitCandidateRegNum = genRegNumFromMask(bestFitCandidateBit); // Find the next RefPosition of the register. LsraLocation nextIntervalLocation = linearScan->getNextIntervalRef(bestFitCandidateRegNum, regType); LsraLocation nextPhysRefLocation = linearScan->getNextFixedRef(bestFitCandidateRegNum, regType); nextPhysRefLocation = Min(nextPhysRefLocation, nextIntervalLocation); // If the nextPhysRefLocation is a fixedRef for the rangeEndRefPosition, increment it so that // we don't think it isn't covering the live range. // This doesn't handle the case where earlier RefPositions for this Interval are also // FixedRefs of this regNum, but at least those are only interesting in the case where those // are "local last uses" of the Interval - otherwise the liveRange would interfere with the reg. // TODO: This duplicates code in an earlier loop, and is basically here to duplicate previous // behavior; see if we can avoid this. if (nextPhysRefLocation == rangeEndLocation && rangeEndRefPosition->isFixedRefOfReg(bestFitCandidateRegNum)) { INDEBUG(linearScan->dumpLsraAllocationEvent(LSRA_EVENT_INCREMENT_RANGE_END, currentInterval)); nextPhysRefLocation++; } if (nextPhysRefLocation == bestFitLocation) { bestFitSet |= bestFitCandidateBit; } else { bool isBetter = false; if (nextPhysRefLocation > lastLocation) { // This covers the full range; favor it if the other doesn't, or if it's a closer match. if ((bestFitLocation <= lastLocation) || (nextPhysRefLocation < bestFitLocation)) { isBetter = true; } } else { // This doesn't cover the full range; favor it if the other doesn't either, but this ends later. if ((bestFitLocation <= lastLocation) && (nextPhysRefLocation > bestFitLocation)) { isBetter = true; } } if (isBetter) { bestFitSet = bestFitCandidateBit; bestFitLocation = nextPhysRefLocation; } } } assert(bestFitSet != RBM_NONE); found = applySelection(BEST_FIT, bestFitSet); } // ---------------------------------------------------------- // try_IS_PREV_REG: Apply the IS_PREV_REG heuristic. // // Note: Oddly, the previous heuristics only considered this if it covered the range. // TODO: Check if Only applies if we have freeCandidates. // void LinearScan::RegisterSelection::try_IS_PREV_REG() { // TODO: We do not check found here. if ((prevRegRec != nullptr) && ((score & COVERS_FULL) != 0)) { found = applySingleRegSelection(IS_PREV_REG, prevRegBit); } } // ---------------------------------------------------------- // try_REG_ORDER: Apply the REG_ORDER heuristic. Only applies if we have freeCandidates. // void LinearScan::RegisterSelection::try_REG_ORDER() { assert(!found); if (freeCandidates == RBM_NONE) { return; } // This will always result in a single candidate. That is, it is the tie-breaker // for free candidates, and doesn't make sense as anything other than the last // heuristic for free registers. unsigned lowestRegOrder = UINT_MAX; regMaskTP lowestRegOrderBit = RBM_NONE; for (regMaskTP regOrderCandidates = candidates; regOrderCandidates != RBM_NONE;) { regMaskTP regOrderCandidateBit = genFindLowestBit(regOrderCandidates); regOrderCandidates &= ~regOrderCandidateBit; regNumber regOrderCandidateRegNum = genRegNumFromMask(regOrderCandidateBit); unsigned thisRegOrder = linearScan->getRegisterRecord(regOrderCandidateRegNum)->regOrder; if (thisRegOrder < lowestRegOrder) { lowestRegOrder = thisRegOrder; lowestRegOrderBit = regOrderCandidateBit; } } assert(lowestRegOrderBit != RBM_NONE); found = applySingleRegSelection(REG_ORDER, lowestRegOrderBit); } // ---------------------------------------------------------- // try_SPILL_COST: Apply the SPILL_COST heuristic. // void LinearScan::RegisterSelection::try_SPILL_COST() { assert(!found); // The set of registers with the lowest spill weight. regMaskTP lowestCostSpillSet = RBM_NONE; // Apply the SPILL_COST heuristic and eliminate regs that can't be spilled. // The spill weight for 'refPosition' (the one we're allocating now). weight_t thisSpillWeight = linearScan->getWeight(refPosition); // The spill weight for the best candidate we've found so far. weight_t bestSpillWeight = FloatingPointUtils::infinite_double(); // True if we found registers with lower spill weight than this refPosition. bool foundLowerSpillWeight = false; for (regMaskTP spillCandidates = candidates; spillCandidates != RBM_NONE;) { regMaskTP spillCandidateBit = genFindLowestBit(spillCandidates); spillCandidates &= ~spillCandidateBit; regNumber spillCandidateRegNum = genRegNumFromMask(spillCandidateBit); RegRecord* spillCandidateRegRecord = &linearScan->physRegs[spillCandidateRegNum]; Interval* assignedInterval = spillCandidateRegRecord->assignedInterval; // Can and should the interval in this register be spilled for this one, // if we don't find a better alternative? if ((linearScan->getNextIntervalRef(spillCandidateRegNum, regType) == currentLocation) && !assignedInterval->getNextRefPosition()->RegOptional()) { continue; } if (!linearScan->isSpillCandidate(currentInterval, refPosition, spillCandidateRegRecord)) { continue; } weight_t currentSpillWeight = 0; RefPosition* recentRefPosition = assignedInterval != nullptr ? assignedInterval->recentRefPosition : nullptr; if ((recentRefPosition != nullptr) && (recentRefPosition->RegOptional() && !(assignedInterval->isLocalVar && recentRefPosition->IsActualRef()))) { // We do not "spillAfter" if previous (recent) refPosition was regOptional or if it // is not an actual ref. In those cases, we will reload in future (next) refPosition. // For such cases, consider the spill cost of next refposition. // See notes in "spillInterval()". RefPosition* reloadRefPosition = assignedInterval->getNextRefPosition(); if (reloadRefPosition != nullptr) { currentSpillWeight = linearScan->getWeight(reloadRefPosition); } } // Only consider spillCost if we were not able to calculate weight of reloadRefPosition. if (currentSpillWeight == 0) { currentSpillWeight = linearScan->spillCost[spillCandidateRegNum]; #ifdef TARGET_ARM if (currentInterval->registerType == TYP_DOUBLE) { currentSpillWeight = max(currentSpillWeight, linearScan->spillCost[REG_NEXT(spillCandidateRegNum)]); } #endif } if (currentSpillWeight < bestSpillWeight) { bestSpillWeight = currentSpillWeight; lowestCostSpillSet = spillCandidateBit; } else if (currentSpillWeight == bestSpillWeight) { lowestCostSpillSet |= spillCandidateBit; } } if (lowestCostSpillSet == RBM_NONE) { return; } // We won't spill if this refPosition is RegOptional() and we have no candidates // with a lower spill cost. if ((bestSpillWeight >= thisSpillWeight) && refPosition->RegOptional()) { currentInterval->assignedReg = nullptr; skipAllocation = true; found = true; } // We must have at least one with the lowest spill cost. assert(lowestCostSpillSet != RBM_NONE); found = applySelection(SPILL_COST, lowestCostSpillSet); } // ---------------------------------------------------------- // try_FAR_NEXT_REF: Apply the FAR_NEXT_REF heuristic. // void LinearScan::RegisterSelection::try_FAR_NEXT_REF() { assert(!found); LsraLocation farthestLocation = MinLocation; regMaskTP farthestSet = RBM_NONE; for (regMaskTP farthestCandidates = candidates; farthestCandidates != RBM_NONE;) { regMaskTP farthestCandidateBit = genFindLowestBit(farthestCandidates); farthestCandidates &= ~farthestCandidateBit; regNumber farthestCandidateRegNum = genRegNumFromMask(farthestCandidateBit); // Find the next RefPosition of the register. LsraLocation nextIntervalLocation = linearScan->getNextIntervalRef(farthestCandidateRegNum, currentInterval->registerType); LsraLocation nextPhysRefLocation = Min(linearScan->nextFixedRef[farthestCandidateRegNum], nextIntervalLocation); if (nextPhysRefLocation == farthestLocation) { farthestSet |= farthestCandidateBit; } else if (nextPhysRefLocation > farthestLocation) { farthestSet = farthestCandidateBit; farthestLocation = nextPhysRefLocation; } } // We must have at least one with the lowest spill cost. assert(farthestSet != RBM_NONE); found = applySelection(FAR_NEXT_REF, farthestSet); } // ---------------------------------------------------------- // try_PREV_REG_OPT: Apply the PREV_REG_OPT heuristic. // void LinearScan::RegisterSelection::try_PREV_REG_OPT() { assert(!found); regMaskTP prevRegOptSet = RBM_NONE; for (regMaskTP prevRegOptCandidates = candidates; prevRegOptCandidates != RBM_NONE;) { regMaskTP prevRegOptCandidateBit = genFindLowestBit(prevRegOptCandidates); prevRegOptCandidates &= ~prevRegOptCandidateBit; regNumber prevRegOptCandidateRegNum = genRegNumFromMask(prevRegOptCandidateBit); Interval* assignedInterval = linearScan->physRegs[prevRegOptCandidateRegNum].assignedInterval; bool foundPrevRegOptReg = true; #ifdef DEBUG bool hasAssignedInterval = false; #endif if ((assignedInterval != nullptr) && (assignedInterval->recentRefPosition != nullptr)) { foundPrevRegOptReg &= (assignedInterval->recentRefPosition->reload && assignedInterval->recentRefPosition->RegOptional()); #ifdef DEBUG hasAssignedInterval = true; #endif } #ifndef TARGET_ARM else { foundPrevRegOptReg = false; } #endif #ifdef TARGET_ARM // If current interval is TYP_DOUBLE, verify if the other half register matches the heuristics. // We have three cases: // 1. One of the register of the pair have an assigned interval: Check if that register's refPosition // matches the heuristics. If yes, add it to the set. // 2. Both registers of the pair have an assigned interval: Conservatively "and" conditions for // heuristics of their corresponding refPositions. If both register's heuristic matches, add them // to the set. TODO-CQ-ARM: We may implement a better condition later. // 3. None of the register have an assigned interval: Skip adding register and assert. if (currentInterval->registerType == TYP_DOUBLE) { regNumber anotherHalfRegNum = linearScan->findAnotherHalfRegNum(prevRegOptCandidateRegNum); assignedInterval = linearScan->physRegs[anotherHalfRegNum].assignedInterval; if ((assignedInterval != nullptr) && (assignedInterval->recentRefPosition != nullptr)) { if (assignedInterval->recentRefPosition->reload && assignedInterval->recentRefPosition->RegOptional()) { foundPrevRegOptReg &= (assignedInterval->recentRefPosition->reload && assignedInterval->recentRefPosition->RegOptional()); } #ifdef DEBUG hasAssignedInterval = true; #endif } } #endif if (foundPrevRegOptReg) { // TODO-Cleanup: Previously, we always used the highest regNum with a previous regOptional // RefPosition, which is not really consistent with the way other selection criteria are // applied. should probably be: prevRegOptSet |= prevRegOptCandidateBit; prevRegOptSet = prevRegOptCandidateBit; } #ifdef DEBUG // The assigned should be non-null, and should have a recentRefPosition, however since // this is a heuristic, we don't want a fatal error, so we just assert (not noway_assert). if (!hasAssignedInterval) { assert(!"Spill candidate has no assignedInterval recentRefPosition"); } #endif } found = applySelection(PREV_REG_OPT, prevRegOptSet); } // ---------------------------------------------------------- // try_REG_NUM: Apply the REG_NUM heuristic. // void LinearScan::RegisterSelection::try_REG_NUM() { assert(!found); found = applySingleRegSelection(REG_NUM, genFindLowestBit(candidates)); } // ---------------------------------------------------------- // calculateCoversSets: Calculate the necessary covers set registers to be used // for heuristics lke COVERS, COVERS_RELATED, COVERS_FULL. // void LinearScan::RegisterSelection::calculateCoversSets() { if (freeCandidates == RBM_NONE || coversSetsCalculated) { return; } preferenceSet = (candidates & preferences); regMaskTP coversCandidates = (preferenceSet == RBM_NONE) ? candidates : preferenceSet; for (; coversCandidates != RBM_NONE;) { regMaskTP coversCandidateBit = genFindLowestBit(coversCandidates); coversCandidates &= ~coversCandidateBit; regNumber coversCandidateRegNum = genRegNumFromMask(coversCandidateBit); // If we have a single candidate we don't need to compute the preference-related sets, but we // do need to compute the unassignedSet. if (!found) { // Find the next RefPosition of the register. LsraLocation nextIntervalLocation = linearScan->getNextIntervalRef(coversCandidateRegNum, regType); LsraLocation nextPhysRefLocation = linearScan->getNextFixedRef(coversCandidateRegNum, regType); LsraLocation coversCandidateLocation = Min(nextPhysRefLocation, nextIntervalLocation); // If the nextPhysRefLocation is a fixedRef for the rangeEndRefPosition, increment it so that // we don't think it isn't covering the live range. // This doesn't handle the case where earlier RefPositions for this Interval are also // FixedRefs of this regNum, but at least those are only interesting in the case where those // are "local last uses" of the Interval - otherwise the liveRange would interfere with the reg. if (coversCandidateLocation == rangeEndLocation && rangeEndRefPosition->isFixedRefOfReg(coversCandidateRegNum)) { INDEBUG(linearScan->dumpLsraAllocationEvent(LSRA_EVENT_INCREMENT_RANGE_END, currentInterval)); coversCandidateLocation++; } if (coversCandidateLocation > rangeEndLocation) { coversSet |= coversCandidateBit; } if ((coversCandidateBit & relatedPreferences) != RBM_NONE) { if (coversCandidateLocation > relatedLastLocation) { coversRelatedSet |= coversCandidateBit; } } else if (coversCandidateBit == refPosition->registerAssignment) { // If we had a fixed-reg def of a reg that will be killed before the use, prefer it to any other // registers with the same score. (Note that we haven't changed the original registerAssignment // on the RefPosition). // Overload the RELATED_PREFERENCE value. // TODO-CQ: Consider if this should be split out. coversRelatedSet |= coversCandidateBit; } // Does this cover the full range of the interval? if (coversCandidateLocation > lastLocation) { coversFullSet |= coversCandidateBit; } } // The register is considered unassigned if it has no assignedInterval, OR // if its next reference is beyond the range of this interval. if (linearScan->nextIntervalRef[coversCandidateRegNum] > lastLocation) { unassignedSet |= coversCandidateBit; } } coversSetsCalculated = true; } // ---------------------------------------------------------- // select: For given `currentInterval` and `refPosition`, selects a register to be assigned. // // Arguments: // currentInterval - Current interval for which register needs to be selected. // refPosition - Refposition within the interval for which register needs to be selected. // // Return Values: // Register bit selected (a single register) and REG_NA if no register was selected. // regMaskTP LinearScan::RegisterSelection::select(Interval* currentInterval, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)) { #ifdef DEBUG *registerScore = NONE; #endif reset(currentInterval, refPosition); // process data-structures if (RefTypeIsDef(refPosition->refType)) { if (currentInterval->hasConflictingDefUse) { linearScan->resolveConflictingDefAndUse(currentInterval, refPosition); candidates = refPosition->registerAssignment; } // Otherwise, check for the case of a fixed-reg def of a reg that will be killed before the // use, or interferes at the point of use (which shouldn't happen, but Lower doesn't mark // the contained nodes as interfering). // Note that we may have a ParamDef RefPosition that is marked isFixedRegRef, but which // has had its registerAssignment changed to no longer be a single register. else if (refPosition->isFixedRegRef && nextRefPos != nullptr && RefTypeIsUse(nextRefPos->refType) && !nextRefPos->isFixedRegRef && genMaxOneBit(refPosition->registerAssignment)) { regNumber defReg = refPosition->assignedReg(); RegRecord* defRegRecord = linearScan->getRegisterRecord(defReg); RefPosition* currFixedRegRefPosition = defRegRecord->recentRefPosition; assert(currFixedRegRefPosition != nullptr && currFixedRegRefPosition->nodeLocation == refPosition->nodeLocation); // If there is another fixed reference to this register before the use, change the candidates // on this RefPosition to include that of nextRefPos. RefPosition* nextFixedRegRefPosition = defRegRecord->getNextRefPosition(); if (nextFixedRegRefPosition != nullptr && nextFixedRegRefPosition->nodeLocation <= nextRefPos->getRefEndLocation()) { candidates |= nextRefPos->registerAssignment; if (preferences == refPosition->registerAssignment) { preferences = candidates; } } } } preferences &= candidates; if (preferences == RBM_NONE) { preferences = candidates; } #ifdef DEBUG candidates = linearScan->stressLimitRegs(refPosition, candidates); #endif assert(candidates != RBM_NONE); Interval* nextRelatedInterval = relatedInterval; Interval* finalRelatedInterval = relatedInterval; Interval* rangeEndInterval = relatedInterval; bool avoidByteRegs = false; #ifdef TARGET_X86 if ((relatedPreferences & ~RBM_BYTE_REGS) != RBM_NONE) { avoidByteRegs = true; } #endif // Follow the chain of related intervals, as long as: // - The next reference is a def. We don't want to use the relatedInterval for preferencing if its next reference // is not a new definition (as it either is or will become live). // - The next (def) reference is downstream. Otherwise we could iterate indefinitely because the preferences can be // circular. // - The intersection of preferenced registers is non-empty. // while (nextRelatedInterval != nullptr) { RefPosition* nextRelatedRefPosition = nextRelatedInterval->getNextRefPosition(); // Only use the relatedInterval for preferencing if the related interval's next reference // is a new definition. if ((nextRelatedRefPosition != nullptr) && RefTypeIsDef(nextRelatedRefPosition->refType)) { finalRelatedInterval = nextRelatedInterval; nextRelatedInterval = nullptr; // First, get the preferences for this interval regMaskTP thisRelatedPreferences = finalRelatedInterval->getCurrentPreferences(); // Now, determine if they are compatible and update the relatedPreferences that we'll consider. regMaskTP newRelatedPreferences = thisRelatedPreferences & relatedPreferences; if (newRelatedPreferences != RBM_NONE && (!avoidByteRegs || thisRelatedPreferences != RBM_BYTE_REGS)) { // TODO-CQ: The following isFree() check doesn't account for the possibility that there's an // assignedInterval whose recentRefPosition was delayFree. It also fails to account for // the TYP_DOUBLE case on ARM. It would be better to replace the call to isFree with // isRegAvailable(genRegNumFromMask(newRelatedPreferences), regType)), but this is retained // to achieve zero diffs. // bool thisIsSingleReg = isSingleRegister(newRelatedPreferences); if (!thisIsSingleReg || (finalRelatedInterval->isLocalVar && linearScan->isFree(linearScan->getRegisterRecord(genRegNumFromMask(newRelatedPreferences))))) { relatedPreferences = newRelatedPreferences; // If this Interval has a downstream def without a single-register preference, continue to iterate. if (nextRelatedRefPosition->nodeLocation > rangeEndLocation) { preferCalleeSave = (preferCalleeSave || finalRelatedInterval->preferCalleeSave); rangeEndLocation = nextRelatedRefPosition->getRangeEndLocation(); rangeEndInterval = finalRelatedInterval; nextRelatedInterval = finalRelatedInterval->relatedInterval; } } } } else { if (nextRelatedInterval == relatedInterval) { relatedInterval = nullptr; relatedPreferences = RBM_NONE; } nextRelatedInterval = nullptr; } } // For floating point, we want to be less aggressive about using callee-save registers. // So in that case, we just need to ensure that the current RefPosition is covered. if (useFloatReg(currentInterval->registerType)) { rangeEndRefPosition = refPosition; preferCalleeSave = currentInterval->preferCalleeSave; } else if (currentInterval->isWriteThru && refPosition->spillAfter) { // This is treated as a last use of the register, as there is an upcoming EH boundary. rangeEndRefPosition = refPosition; } else { rangeEndRefPosition = refPosition->getRangeEndRef(); // If we have a chain of related intervals, and a finalRelatedInterval that // is not currently occupying a register, and whose lifetime begins after this one, // we want to try to select a register that will cover its lifetime. if ((rangeEndInterval != nullptr) && (rangeEndInterval->assignedReg == nullptr) && !rangeEndInterval->isWriteThru && (rangeEndInterval->getNextRefLocation() >= rangeEndRefPosition->nodeLocation)) { lastRefPosition = rangeEndInterval->lastRefPosition; } } if ((relatedInterval != nullptr) && !relatedInterval->isWriteThru) { relatedLastLocation = relatedInterval->lastRefPosition->nodeLocation; } if (preferCalleeSave) { regMaskTP calleeSaveCandidates = calleeSaveRegs(currentInterval->registerType); if (currentInterval->isWriteThru) { // We'll only prefer a callee-save register if it's already been used. regMaskTP unusedCalleeSaves = calleeSaveCandidates & ~(linearScan->compiler->codeGen->regSet.rsGetModifiedRegsMask()); callerCalleePrefs = calleeSaveCandidates & ~unusedCalleeSaves; preferences &= ~unusedCalleeSaves; } else { callerCalleePrefs = calleeSaveCandidates; } } else { callerCalleePrefs = callerSaveRegs(currentInterval->registerType); } // If this has a delayed use (due to being used in a rmw position of a // non-commutative operator), its endLocation is delayed until the "def" // position, which is one location past the use (getRefEndLocation() takes care of this). rangeEndLocation = rangeEndRefPosition->getRefEndLocation(); lastLocation = lastRefPosition->getRefEndLocation(); // We'll set this to short-circuit remaining heuristics when we have a single candidate. found = false; // Is this a fixedReg? regMaskTP fixedRegMask = RBM_NONE; if (refPosition->isFixedRegRef) { assert(genMaxOneBit(refPosition->registerAssignment)); fixedRegMask = refPosition->registerAssignment; if (candidates == refPosition->registerAssignment) { found = true; if (linearScan->nextIntervalRef[genRegNumFromMask(candidates)] > lastLocation) { unassignedSet = candidates; } } } // Eliminate candidates that are in-use or busy. if (!found) { regMaskTP busyRegs = linearScan->regsBusyUntilKill | linearScan->regsInUseThisLocation; candidates &= ~busyRegs; // Also eliminate as busy any register with a conflicting fixed reference at this or // the next location. // Note that this will eliminate the fixedReg, if any, but we'll add it back below. regMaskTP checkConflictMask = candidates & linearScan->fixedRegs; while (checkConflictMask != RBM_NONE) { regMaskTP checkConflictBit = genFindLowestBit(checkConflictMask); checkConflictMask &= ~checkConflictBit; regNumber checkConflictReg = genRegNumFromMask(checkConflictBit); LsraLocation checkConflictLocation = linearScan->nextFixedRef[checkConflictReg]; if ((checkConflictLocation == currentLocation) || (refPosition->delayRegFree && (checkConflictLocation == (currentLocation + 1)))) { candidates &= ~checkConflictBit; } } candidates |= fixedRegMask; found = isSingleRegister(candidates); } // By chance, is prevRegRec already holding this interval, as a copyReg or having // been restored as inactive after a kill? // NOTE: this is not currently considered one of the selection criteria - it always wins // if it is the assignedInterval of 'prevRegRec'. if (!found && (prevRegRec != nullptr)) { prevRegBit = genRegMask(prevRegRec->regNum); if ((prevRegRec->assignedInterval == currentInterval) && ((candidates & prevRegBit) != RBM_NONE)) { candidates = prevRegBit; found = true; #ifdef DEBUG *registerScore = THIS_ASSIGNED; #endif } } else { prevRegBit = RBM_NONE; } if (!found && (candidates == RBM_NONE)) { assert(refPosition->RegOptional()); currentInterval->assignedReg = nullptr; return RBM_NONE; } // TODO-Cleanup: Previously, the "reverseSelect" stress mode reversed the order of the heuristics. // It needs to be re-engineered with this refactoring. // In non-debug builds, this will simply get optimized away bool reverseSelect = false; #ifdef DEBUG reverseSelect = linearScan->doReverseSelect(); #endif // DEBUG freeCandidates = linearScan->getFreeCandidates(candidates, regType); // If no free candidates, then double check if refPosition is an actual ref. if (freeCandidates == RBM_NONE) { // We won't spill if this refPosition is not an actual ref. if (!refPosition->IsActualRef()) { currentInterval->assignedReg = nullptr; return RBM_NONE; } } else { // Set the 'matchingConstants' set. if (currentInterval->isConstant && RefTypeIsDef(refPosition->refType)) { matchingConstants = linearScan->getMatchingConstants(candidates, currentInterval, refPosition); } } #define IF_FOUND_GOTO_DONE \ if (found) \ goto Selection_Done; #ifdef DEBUG HeuristicFn fn; for (int orderId = 0; orderId < REGSELECT_HEURISTIC_COUNT; orderId++) { IF_FOUND_GOTO_DONE RegisterScore heuristicToApply = RegSelectionOrder[orderId]; if (mappingTable->Lookup(heuristicToApply, &fn)) { (this->*fn)(); if (found) { *registerScore = heuristicToApply; } #if TRACK_LSRA_STATS INTRACK_STATS_IF(found, linearScan->updateLsraStat(linearScan->getLsraStatFromScore(heuristicToApply), refPosition->bbNum)); #endif // TRACK_LSRA_STATS } else { assert(!"Unexpected heuristic value!"); } } #else // RELEASE // In release, just invoke the default order #define REG_SEL_DEF(stat, value, shortname, orderSeqId) \ try_##stat(); \ IF_FOUND_GOTO_DONE #include "lsra_score.h" #undef REG_SEL_DEF #endif // DEBUG #undef IF_FOUND_GOTO_DONE Selection_Done: if (skipAllocation) { return RBM_NONE; } calculateCoversSets(); assert(found && isSingleRegister(candidates)); foundRegBit = candidates; return candidates; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX Linear Scan Register Allocation a.k.a. LSRA Preconditions - All register requirements are expressed in the code stream, either as destination registers of tree nodes, or as internal registers. These requirements are expressed in the RefPositions built for each node by BuildNode(), which includes: - The register uses and definitions. - The register restrictions (candidates) of the target register, both from itself, as producer of the value (dstCandidates), and from its consuming node (srcCandidates). Note that when we talk about srcCandidates we are referring to the destination register (not any of its sources). - The number (internalCount) of registers required, and their register restrictions (internalCandidates). These are neither inputs nor outputs of the node, but used in the sequence of code generated for the tree. "Internal registers" are registers used during the code sequence generated for the node. The register lifetimes must obey the following lifetime model: - First, any internal registers are defined. - Next, any source registers are used (and are then freed if they are last use and are not identified as "delayRegFree"). - Next, the internal registers are used (and are then freed). - Next, any registers in the kill set for the instruction are killed. - Next, the destination register(s) are defined (multiple destination registers are only supported on ARM) - Finally, any "delayRegFree" source registers are freed. There are several things to note about this order: - The internal registers will never overlap any use, but they may overlap a destination register. - Internal registers are never live beyond the node. - The "delayRegFree" annotation is used for instructions that are only available in a Read-Modify-Write form. That is, the destination register is one of the sources. In this case, we must not use the same register for the non-RMW operand as for the destination. Overview (doLinearScan): - Walk all blocks, building intervals and RefPositions (buildIntervals) - Allocate registers (allocateRegisters) - Annotate nodes with register assignments (resolveRegisters) - Add move nodes as needed to resolve conflicting register assignments across non-adjacent edges. (resolveEdges, called from resolveRegisters) Postconditions: Tree nodes (GenTree): - GenTree::GetRegNum() (and gtRegPair for ARM) is annotated with the register assignment for a node. If the node does not require a register, it is annotated as such (GetRegNum() = REG_NA). For a variable definition or interior tree node (an "implicit" definition), this is the register to put the result. For an expression use, this is the place to find the value that has previously been computed. - In most cases, this register must satisfy the constraints specified for the RefPosition. - In some cases, this is difficult: - If a lclVar node currently lives in some register, it may not be desirable to move it (i.e. its current location may be desirable for future uses, e.g. if it's a callee save register, but needs to be in a specific arg register for a call). - In other cases there may be conflicts on the restrictions placed by the defining node and the node which consumes it - If such a node is constrained to a single fixed register (e.g. an arg register, or a return from a call), then LSRA is free to annotate the node with a different register. The code generator must issue the appropriate move. - However, if such a node is constrained to a set of registers, and its current location does not satisfy that requirement, LSRA must insert a GT_COPY node between the node and its parent. The GetRegNum() on the GT_COPY node must satisfy the register requirement of the parent. - GenTree::gtRsvdRegs has a set of registers used for internal temps. - A tree node is marked GTF_SPILL if the tree node must be spilled by the code generator after it has been evaluated. - LSRA currently does not set GTF_SPILLED on such nodes, because it caused problems in the old code generator. In the new backend perhaps this should change (see also the note below under CodeGen). - A tree node is marked GTF_SPILLED if it is a lclVar that must be reloaded prior to use. - The register (GetRegNum()) on the node indicates the register to which it must be reloaded. - For lclVar nodes, since the uses and defs are distinct tree nodes, it is always possible to annotate the node with the register to which the variable must be reloaded. - For other nodes, since they represent both the def and use, if the value must be reloaded to a different register, LSRA must insert a GT_RELOAD node in order to specify the register to which it should be reloaded. Local variable table (LclVarDsc): - LclVarDsc::lvRegister is set to true if a local variable has the same register assignment for its entire lifetime. - LclVarDsc::lvRegNum / GetOtherReg(): these are initialized to their first value at the end of LSRA (it looks like GetOtherReg() isn't? This is probably a bug (ARM)). Codegen will set them to their current value as it processes the trees, since a variable can (now) be assigned different registers over its lifetimes. XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "lsra.h" #ifdef DEBUG const char* LinearScan::resolveTypeName[] = {"Split", "Join", "Critical", "SharedCritical"}; #endif // DEBUG /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Small Helper functions XX XX XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ //-------------------------------------------------------------- // lsraAssignRegToTree: Assign the given reg to tree node. // // Arguments: // tree - Gentree node // reg - register to be assigned // regIdx - register idx, if tree is a multi-reg call node. // regIdx will be zero for single-reg result producing tree nodes. // // Return Value: // None // void lsraAssignRegToTree(GenTree* tree, regNumber reg, unsigned regIdx) { if (regIdx == 0) { tree->SetRegNum(reg); } #if !defined(TARGET_64BIT) else if (tree->OperIsMultiRegOp()) { assert(regIdx == 1); GenTreeMultiRegOp* mul = tree->AsMultiRegOp(); mul->gtOtherReg = reg; } #endif // TARGET_64BIT #if FEATURE_MULTIREG_RET else if (tree->OperGet() == GT_COPY) { assert(regIdx == 1); GenTreeCopyOrReload* copy = tree->AsCopyOrReload(); copy->gtOtherRegs[0] = (regNumberSmall)reg; } #endif // FEATURE_MULTIREG_RET #if FEATURE_ARG_SPLIT else if (tree->OperIsPutArgSplit()) { GenTreePutArgSplit* putArg = tree->AsPutArgSplit(); putArg->SetRegNumByIdx(reg, regIdx); } #endif // FEATURE_ARG_SPLIT #ifdef FEATURE_HW_INTRINSICS else if (tree->OperIs(GT_HWINTRINSIC)) { assert(regIdx == 1); // TODO-ARM64-NYI: Support hardware intrinsics operating on multiple contiguous registers. tree->AsHWIntrinsic()->SetOtherReg(reg); } #endif // FEATURE_HW_INTRINSICS else if (tree->OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR)) { tree->AsLclVar()->SetRegNumByIdx(reg, regIdx); } else { assert(tree->IsMultiRegCall()); GenTreeCall* call = tree->AsCall(); call->SetRegNumByIdx(reg, regIdx); } } //------------------------------------------------------------- // getWeight: Returns the weight of the RefPosition. // // Arguments: // refPos - ref position // // Returns: // Weight of ref position. weight_t LinearScan::getWeight(RefPosition* refPos) { weight_t weight; GenTree* treeNode = refPos->treeNode; if (treeNode != nullptr) { if (isCandidateLocalRef(treeNode)) { // Tracked locals: use weighted ref cnt as the weight of the // ref position. const LclVarDsc* varDsc = compiler->lvaGetDesc(treeNode->AsLclVarCommon()); weight = varDsc->lvRefCntWtd(); if (refPos->getInterval()->isSpilled) { // Decrease the weight if the interval has already been spilled. if (varDsc->lvLiveInOutOfHndlr || refPos->getInterval()->firstRefPosition->singleDefSpill) { // An EH-var/single-def is always spilled at defs, and we'll decrease the weight by half, // since only the reload is needed. weight = weight / 2; } else { weight -= BB_UNITY_WEIGHT; } } } else { // Non-candidate local ref or non-lcl tree node. // These are considered to have two references in the basic block: // a def and a use and hence weighted ref count would be 2 times // the basic block weight in which they appear. // However, it is generally more harmful to spill tree temps, so we // double that. const unsigned TREE_TEMP_REF_COUNT = 2; const unsigned TREE_TEMP_BOOST_FACTOR = 2; weight = TREE_TEMP_REF_COUNT * TREE_TEMP_BOOST_FACTOR * blockInfo[refPos->bbNum].weight; } } else { // Non-tree node ref positions. These will have a single // reference in the basic block and hence their weighted // refcount is equal to the block weight in which they // appear. weight = blockInfo[refPos->bbNum].weight; } return weight; } // allRegs represents a set of registers that can // be used to allocate the specified type in any point // in time (more of a 'bank' of registers). regMaskTP LinearScan::allRegs(RegisterType rt) { assert((rt != TYP_UNDEF) && (rt != TYP_STRUCT)); if (rt == TYP_FLOAT) { return availableFloatRegs; } else if (rt == TYP_DOUBLE) { return availableDoubleRegs; } #ifdef FEATURE_SIMD // TODO-Cleanup: Add an RBM_ALLSIMD else if (varTypeIsSIMD(rt)) { return availableDoubleRegs; } #endif // FEATURE_SIMD else { return availableIntRegs; } } regMaskTP LinearScan::allByteRegs() { #ifdef TARGET_X86 return availableIntRegs & RBM_BYTE_REGS; #else return availableIntRegs; #endif } regMaskTP LinearScan::allSIMDRegs() { return availableFloatRegs; } void LinearScan::updateNextFixedRef(RegRecord* regRecord, RefPosition* nextRefPosition) { LsraLocation nextLocation; if (nextRefPosition == nullptr) { nextLocation = MaxLocation; fixedRegs &= ~genRegMask(regRecord->regNum); } else { nextLocation = nextRefPosition->nodeLocation; fixedRegs |= genRegMask(regRecord->regNum); } nextFixedRef[regRecord->regNum] = nextLocation; } regMaskTP LinearScan::getMatchingConstants(regMaskTP mask, Interval* currentInterval, RefPosition* refPosition) { assert(currentInterval->isConstant && RefTypeIsDef(refPosition->refType)); regMaskTP candidates = (mask & m_RegistersWithConstants); regMaskTP result = RBM_NONE; while (candidates != RBM_NONE) { regMaskTP candidateBit = genFindLowestBit(candidates); candidates &= ~candidateBit; regNumber regNum = genRegNumFromMask(candidateBit); RegRecord* physRegRecord = getRegisterRecord(regNum); if (isMatchingConstant(physRegRecord, refPosition)) { result |= candidateBit; } } return result; } void LinearScan::clearNextIntervalRef(regNumber reg, var_types regType) { nextIntervalRef[reg] = MaxLocation; #ifdef TARGET_ARM if (regType == TYP_DOUBLE) { assert(genIsValidDoubleReg(reg)); regNumber otherReg = REG_NEXT(reg); nextIntervalRef[otherReg] = MaxLocation; } #endif } void LinearScan::clearSpillCost(regNumber reg, var_types regType) { spillCost[reg] = 0; #ifdef TARGET_ARM if (regType == TYP_DOUBLE) { assert(genIsValidDoubleReg(reg)); regNumber otherReg = REG_NEXT(reg); spillCost[otherReg] = 0; } #endif } void LinearScan::updateNextIntervalRef(regNumber reg, Interval* interval) { LsraLocation nextRefLocation = interval->getNextRefLocation(); nextIntervalRef[reg] = nextRefLocation; #ifdef TARGET_ARM if (interval->registerType == TYP_DOUBLE) { regNumber otherReg = REG_NEXT(reg); nextIntervalRef[otherReg] = nextRefLocation; } #endif } void LinearScan::updateSpillCost(regNumber reg, Interval* interval) { // An interval can have no recentRefPosition if this is the initial assignment // of a parameter to its home register. weight_t cost = (interval->recentRefPosition != nullptr) ? getWeight(interval->recentRefPosition) : 0; spillCost[reg] = cost; #ifdef TARGET_ARM if (interval->registerType == TYP_DOUBLE) { regNumber otherReg = REG_NEXT(reg); spillCost[otherReg] = cost; } #endif } //------------------------------------------------------------------------ // internalFloatRegCandidates: Return the set of registers that are appropriate // for use as internal float registers. // // Return Value: // The set of registers (as a regMaskTP). // // Notes: // compFloatingPointUsed is only required to be set if it is possible that we // will use floating point callee-save registers. // It is unlikely, if an internal register is the only use of floating point, // that it will select a callee-save register. But to be safe, we restrict // the set of candidates if compFloatingPointUsed is not already set. regMaskTP LinearScan::internalFloatRegCandidates() { if (compiler->compFloatingPointUsed) { return allRegs(TYP_FLOAT); } else { return RBM_FLT_CALLEE_TRASH; } } bool LinearScan::isFree(RegRecord* regRecord) { return ((regRecord->assignedInterval == nullptr || !regRecord->assignedInterval->isActive) && !isRegBusy(regRecord->regNum, regRecord->registerType)); } RegRecord* LinearScan::getRegisterRecord(regNumber regNum) { assert((unsigned)regNum < ArrLen(physRegs)); return &physRegs[regNum]; } #ifdef DEBUG //---------------------------------------------------------------------------- // getConstrainedRegMask: Returns new regMask which is the intersection of // regMaskActual and regMaskConstraint if the new regMask has at least // minRegCount registers, otherwise returns regMaskActual. // // Arguments: // regMaskActual - regMask that needs to be constrained // regMaskConstraint - regMask constraint that needs to be // applied to regMaskActual // minRegCount - Minimum number of regs that should be // be present in new regMask. // // Return Value: // New regMask that has minRegCount registers after instersection. // Otherwise returns regMaskActual. regMaskTP LinearScan::getConstrainedRegMask(regMaskTP regMaskActual, regMaskTP regMaskConstraint, unsigned minRegCount) { regMaskTP newMask = regMaskActual & regMaskConstraint; if (genCountBits(newMask) >= minRegCount) { return newMask; } return regMaskActual; } //------------------------------------------------------------------------ // stressLimitRegs: Given a set of registers, expressed as a register mask, reduce // them based on the current stress options. // // Arguments: // mask - The current mask of register candidates for a node // // Return Value: // A possibly-modified mask, based on the value of COMPlus_JitStressRegs. // // Notes: // This is the method used to implement the stress options that limit // the set of registers considered for allocation. regMaskTP LinearScan::stressLimitRegs(RefPosition* refPosition, regMaskTP mask) { if (getStressLimitRegs() != LSRA_LIMIT_NONE) { // The refPosition could be null, for example when called // by getTempRegForResolution(). int minRegCount = (refPosition != nullptr) ? refPosition->minRegCandidateCount : 1; switch (getStressLimitRegs()) { case LSRA_LIMIT_CALLEE: if (!compiler->opts.compDbgEnC) { mask = getConstrainedRegMask(mask, RBM_CALLEE_SAVED, minRegCount); } break; case LSRA_LIMIT_CALLER: { mask = getConstrainedRegMask(mask, RBM_CALLEE_TRASH, minRegCount); } break; case LSRA_LIMIT_SMALL_SET: if ((mask & LsraLimitSmallIntSet) != RBM_NONE) { mask = getConstrainedRegMask(mask, LsraLimitSmallIntSet, minRegCount); } else if ((mask & LsraLimitSmallFPSet) != RBM_NONE) { mask = getConstrainedRegMask(mask, LsraLimitSmallFPSet, minRegCount); } break; default: unreached(); } if (refPosition != nullptr && refPosition->isFixedRegRef) { mask |= refPosition->registerAssignment; } } return mask; } #endif // DEBUG //------------------------------------------------------------------------ // conflictingFixedRegReference: Determine whether the 'reg' has a // fixed register use that conflicts with 'refPosition' // // Arguments: // regNum - The register of interest // refPosition - The RefPosition of interest // // Return Value: // Returns true iff the given RefPosition is NOT a fixed use of this register, // AND either: // - there is a RefPosition on this RegRecord at the nodeLocation of the given RefPosition, or // - the given RefPosition has a delayRegFree, and there is a RefPosition on this RegRecord at // the nodeLocation just past the given RefPosition. // // Assumptions: // 'refPosition is non-null. bool LinearScan::conflictingFixedRegReference(regNumber regNum, RefPosition* refPosition) { // Is this a fixed reference of this register? If so, there is no conflict. if (refPosition->isFixedRefOfRegMask(genRegMask(regNum))) { return false; } // Otherwise, check for conflicts. // There is a conflict if: // 1. There is a recent RefPosition on this RegRecord that is at this location, OR // 2. There is an upcoming RefPosition at this location, or at the next location // if refPosition is a delayed use (i.e. must be kept live through the next/def location). LsraLocation refLocation = refPosition->nodeLocation; RegRecord* regRecord = getRegisterRecord(regNum); if (isRegInUse(regNum, refPosition->getInterval()->registerType) && (regRecord->assignedInterval != refPosition->getInterval())) { return true; } LsraLocation nextPhysRefLocation = nextFixedRef[regNum]; if (nextPhysRefLocation == refLocation || (refPosition->delayRegFree && nextPhysRefLocation == (refLocation + 1))) { return true; } return false; } /***************************************************************************** * Inline functions for Interval *****************************************************************************/ RefPosition* Referenceable::getNextRefPosition() { if (recentRefPosition == nullptr) { return firstRefPosition; } else { return recentRefPosition->nextRefPosition; } } LsraLocation Referenceable::getNextRefLocation() { RefPosition* nextRefPosition = getNextRefPosition(); if (nextRefPosition == nullptr) { return MaxLocation; } else { return nextRefPosition->nodeLocation; } } #ifdef DEBUG void LinearScan::dumpVarToRegMap(VarToRegMap map) { bool anyPrinted = false; for (unsigned varIndex = 0; varIndex < compiler->lvaTrackedCount; varIndex++) { if (map[varIndex] != REG_STK) { printf("V%02u=%s ", compiler->lvaTrackedIndexToLclNum(varIndex), getRegName(map[varIndex])); anyPrinted = true; } } if (!anyPrinted) { printf("none"); } printf("\n"); } void LinearScan::dumpInVarToRegMap(BasicBlock* block) { printf("Var=Reg beg of " FMT_BB ": ", block->bbNum); VarToRegMap map = getInVarToRegMap(block->bbNum); dumpVarToRegMap(map); } void LinearScan::dumpOutVarToRegMap(BasicBlock* block) { printf("Var=Reg end of " FMT_BB ": ", block->bbNum); VarToRegMap map = getOutVarToRegMap(block->bbNum); dumpVarToRegMap(map); } #endif // DEBUG LinearScanInterface* getLinearScanAllocator(Compiler* comp) { return new (comp, CMK_LSRA) LinearScan(comp); } //------------------------------------------------------------------------ // LSRA constructor // // Arguments: // theCompiler // // Notes: // The constructor takes care of initializing the data structures that are used // during Lowering, including (in DEBUG) getting the stress environment variables, // as they may affect the block ordering. LinearScan::LinearScan(Compiler* theCompiler) : compiler(theCompiler) , intervals(theCompiler->getAllocator(CMK_LSRA_Interval)) , allocationPassComplete(false) , refPositions(theCompiler->getAllocator(CMK_LSRA_RefPosition)) , listNodePool(theCompiler) { regSelector = new (theCompiler, CMK_LSRA) RegisterSelection(this); firstColdLoc = MaxLocation; #ifdef DEBUG maxNodeLocation = 0; activeRefPosition = nullptr; // Get the value of the environment variable that controls stress for register allocation lsraStressMask = JitConfig.JitStressRegs(); #if 0 if (lsraStressMask != 0) { // The code in this #if can be used to debug JitStressRegs issues according to // method hash or method count. // To use, simply set environment variables: // JitStressRegsHashLo and JitStressRegsHashHi to set the range of method hash, or // JitStressRegsStart and JitStressRegsEnd to set the range of method count // (Compiler::jitTotalMethodCount as reported by COMPlus_DumpJittedMethods). unsigned methHash = compiler->info.compMethodHash(); char* lostr = getenv("JitStressRegsHashLo"); unsigned methHashLo = 0; bool dump = false; if (lostr != nullptr) { sscanf_s(lostr, "%x", &methHashLo); dump = true; } char* histr = getenv("JitStressRegsHashHi"); unsigned methHashHi = UINT32_MAX; if (histr != nullptr) { sscanf_s(histr, "%x", &methHashHi); dump = true; } if (methHash < methHashLo || methHash > methHashHi) { lsraStressMask = 0; } // Check method count unsigned count = Compiler::jitTotalMethodCompiled; unsigned start = 0; unsigned end = UINT32_MAX; char* startStr = getenv("JitStressRegsStart"); char* endStr = getenv("JitStressRegsEnd"); if (startStr != nullptr) { sscanf_s(startStr, "%d", &start); dump = true; } if (endStr != nullptr) { sscanf_s(endStr, "%d", &end); dump = true; } if (count < start || (count > end)) { lsraStressMask = 0; } if ((lsraStressMask != 0) && (dump == true)) { printf("JitStressRegs = %x for method %d: %s, hash = 0x%x.\n", lsraStressMask, Compiler::jitTotalMethodCompiled, compiler->info.compFullName, compiler->info.compMethodHash()); printf(""); // flush } } #endif // 0 #endif // DEBUG // Assume that we will enregister local variables if it's not disabled. We'll reset it if we // have no tracked locals when we start allocating. Note that new tracked lclVars may be added // after the first liveness analysis - either by optimizations or by Lowering, and the tracked // set won't be recomputed until after Lowering (and this constructor is called prior to Lowering), // so we don't want to check that yet. enregisterLocalVars = compiler->compEnregLocals(); #ifdef TARGET_ARM64 availableIntRegs = (RBM_ALLINT & ~(RBM_PR | RBM_FP | RBM_LR) & ~compiler->codeGen->regSet.rsMaskResvd); #else availableIntRegs = (RBM_ALLINT & ~compiler->codeGen->regSet.rsMaskResvd); #endif #if ETW_EBP_FRAMED availableIntRegs &= ~RBM_FPBASE; #endif // ETW_EBP_FRAMED availableFloatRegs = RBM_ALLFLOAT; availableDoubleRegs = RBM_ALLDOUBLE; #ifdef TARGET_AMD64 if (compiler->opts.compDbgEnC) { // On x64 when the EnC option is set, we always save exactly RBP, RSI and RDI. // RBP is not available to the register allocator, so RSI and RDI are the only // callee-save registers available. availableIntRegs &= ~RBM_CALLEE_SAVED | RBM_RSI | RBM_RDI; availableFloatRegs &= ~RBM_CALLEE_SAVED; availableDoubleRegs &= ~RBM_CALLEE_SAVED; } #endif // TARGET_AMD64 compiler->rpFrameType = FT_NOT_SET; compiler->rpMustCreateEBPCalled = false; compiler->codeGen->intRegState.rsIsFloat = false; compiler->codeGen->floatRegState.rsIsFloat = true; // Block sequencing (the order in which we schedule). // Note that we don't initialize the bbVisitedSet until we do the first traversal // This is so that any blocks that are added during the first traversal // are accounted for (and we don't have BasicBlockEpoch issues). blockSequencingDone = false; blockSequence = nullptr; blockSequenceWorkList = nullptr; curBBSeqNum = 0; bbSeqCount = 0; // Information about each block, including predecessor blocks used for variable locations at block entry. blockInfo = nullptr; pendingDelayFree = false; tgtPrefUse = nullptr; } //------------------------------------------------------------------------ // getNextCandidateFromWorkList: Get the next candidate for block sequencing // // Arguments: // None. // // Return Value: // The next block to be placed in the sequence. // // Notes: // This method currently always returns the next block in the list, and relies on having // blocks added to the list only when they are "ready", and on the // addToBlockSequenceWorkList() method to insert them in the proper order. // However, a block may be in the list and already selected, if it was subsequently // encountered as both a flow and layout successor of the most recently selected // block. BasicBlock* LinearScan::getNextCandidateFromWorkList() { BasicBlockList* nextWorkList = nullptr; for (BasicBlockList* workList = blockSequenceWorkList; workList != nullptr; workList = nextWorkList) { nextWorkList = workList->next; BasicBlock* candBlock = workList->block; removeFromBlockSequenceWorkList(workList, nullptr); if (!isBlockVisited(candBlock)) { return candBlock; } } return nullptr; } //------------------------------------------------------------------------ // setBlockSequence: Determine the block order for register allocation. // // Arguments: // None // // Return Value: // None // // Notes: // On return, the blockSequence array contains the blocks, in the order in which they // will be allocated. // This method clears the bbVisitedSet on LinearScan, and when it returns the set // contains all the bbNums for the block. void LinearScan::setBlockSequence() { assert(!blockSequencingDone); // The method should be called only once. compiler->EnsureBasicBlockEpoch(); #ifdef DEBUG blockEpoch = compiler->GetCurBasicBlockEpoch(); #endif // DEBUG // Initialize the "visited" blocks set. bbVisitedSet = BlockSetOps::MakeEmpty(compiler); BlockSet readySet(BlockSetOps::MakeEmpty(compiler)); BlockSet predSet(BlockSetOps::MakeEmpty(compiler)); assert(blockSequence == nullptr && bbSeqCount == 0); blockSequence = new (compiler, CMK_LSRA) BasicBlock*[compiler->fgBBcount]; bbNumMaxBeforeResolution = compiler->fgBBNumMax; blockInfo = new (compiler, CMK_LSRA) LsraBlockInfo[bbNumMaxBeforeResolution + 1]; assert(blockSequenceWorkList == nullptr); verifiedAllBBs = false; hasCriticalEdges = false; BasicBlock* nextBlock; // We use a bbNum of 0 for entry RefPositions. // The other information in blockInfo[0] will never be used. blockInfo[0].weight = BB_UNITY_WEIGHT; #if TRACK_LSRA_STATS for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++) { blockInfo[0].stats[statIndex] = 0; } #endif // TRACK_LSRA_STATS JITDUMP("Start LSRA Block Sequence: \n"); for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = nextBlock) { JITDUMP("Current block: " FMT_BB "\n", block->bbNum); blockSequence[bbSeqCount] = block; markBlockVisited(block); bbSeqCount++; nextBlock = nullptr; // Initialize the blockInfo. // predBBNum will be set later. // 0 is never used as a bbNum, but is used in blockInfo to designate an exception entry block. blockInfo[block->bbNum].predBBNum = 0; // We check for critical edges below, but initialize to false. blockInfo[block->bbNum].hasCriticalInEdge = false; blockInfo[block->bbNum].hasCriticalOutEdge = false; blockInfo[block->bbNum].weight = block->getBBWeight(compiler); blockInfo[block->bbNum].hasEHBoundaryIn = block->hasEHBoundaryIn(); blockInfo[block->bbNum].hasEHBoundaryOut = block->hasEHBoundaryOut(); blockInfo[block->bbNum].hasEHPred = false; #if TRACK_LSRA_STATS for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++) { blockInfo[block->bbNum].stats[statIndex] = 0; } #endif // TRACK_LSRA_STATS // We treat BBCallAlwaysPairTail blocks as having EH flow, since we can't // insert resolution moves into those blocks. if (block->isBBCallAlwaysPairTail()) { blockInfo[block->bbNum].hasEHBoundaryIn = true; blockInfo[block->bbNum].hasEHBoundaryOut = true; } bool hasUniquePred = (block->GetUniquePred(compiler) != nullptr); for (BasicBlock* const predBlock : block->PredBlocks()) { if (!hasUniquePred) { if (predBlock->NumSucc(compiler) > 1) { blockInfo[block->bbNum].hasCriticalInEdge = true; hasCriticalEdges = true; } else if (predBlock->bbJumpKind == BBJ_SWITCH) { assert(!"Switch with single successor"); } } if (!block->isBBCallAlwaysPairTail() && (predBlock->hasEHBoundaryOut() || predBlock->isBBCallAlwaysPairTail())) { assert(!block->isBBCallAlwaysPairTail()); if (hasUniquePred) { // A unique pred with an EH out edge won't allow us to keep any variables enregistered. blockInfo[block->bbNum].hasEHBoundaryIn = true; } else { blockInfo[block->bbNum].hasEHPred = true; } } } // Determine which block to schedule next. // First, update the NORMAL successors of the current block, adding them to the worklist // according to the desired order. We will handle the EH successors below. const unsigned numSuccs = block->NumSucc(compiler); bool checkForCriticalOutEdge = (numSuccs > 1); if (!checkForCriticalOutEdge && block->bbJumpKind == BBJ_SWITCH) { assert(!"Switch with single successor"); } for (unsigned succIndex = 0; succIndex < numSuccs; succIndex++) { BasicBlock* succ = block->GetSucc(succIndex, compiler); if (checkForCriticalOutEdge && succ->GetUniquePred(compiler) == nullptr) { blockInfo[block->bbNum].hasCriticalOutEdge = true; hasCriticalEdges = true; // We can stop checking now. checkForCriticalOutEdge = false; } if (isTraversalLayoutOrder() || isBlockVisited(succ)) { continue; } // We've now seen a predecessor, so add it to the work list and the "readySet". // It will be inserted in the worklist according to the specified traversal order // (i.e. pred-first or random, since layout order is handled above). if (!BlockSetOps::IsMember(compiler, readySet, succ->bbNum)) { JITDUMP("\tSucc block: " FMT_BB, succ->bbNum); addToBlockSequenceWorkList(readySet, succ, predSet); BlockSetOps::AddElemD(compiler, readySet, succ->bbNum); } } // For layout order, simply use bbNext if (isTraversalLayoutOrder()) { nextBlock = block->bbNext; continue; } while (nextBlock == nullptr) { nextBlock = getNextCandidateFromWorkList(); // TODO-Throughput: We would like to bypass this traversal if we know we've handled all // the blocks - but fgBBcount does not appear to be updated when blocks are removed. if (nextBlock == nullptr /* && bbSeqCount != compiler->fgBBcount*/ && !verifiedAllBBs) { // If we don't encounter all blocks by traversing the regular successor links, do a full // traversal of all the blocks, and add them in layout order. // This may include: // - internal-only blocks which may not be in the flow graph // - blocks that have become unreachable due to optimizations, but that are strongly // connected (these are not removed) // - EH blocks for (BasicBlock* const seqBlock : compiler->Blocks()) { if (!isBlockVisited(seqBlock)) { JITDUMP("\tUnvisited block: " FMT_BB, seqBlock->bbNum); addToBlockSequenceWorkList(readySet, seqBlock, predSet); BlockSetOps::AddElemD(compiler, readySet, seqBlock->bbNum); } } verifiedAllBBs = true; } else { break; } } } blockSequencingDone = true; #ifdef DEBUG // Make sure that we've visited all the blocks. for (BasicBlock* const block : compiler->Blocks()) { assert(isBlockVisited(block)); } JITDUMP("Final LSRA Block Sequence: \n"); int i = 1; for (BasicBlock *block = startBlockSequence(); block != nullptr; ++i, block = moveToNextBlock()) { JITDUMP(FMT_BB, block->bbNum); JITDUMP("(%6s) ", refCntWtd2str(block->getBBWeight(compiler))); if (blockInfo[block->bbNum].hasEHBoundaryIn) { JITDUMP(" EH-in"); } if (blockInfo[block->bbNum].hasEHBoundaryOut) { JITDUMP(" EH-out"); } if (blockInfo[block->bbNum].hasEHPred) { JITDUMP(" has EH pred"); } JITDUMP("\n"); } JITDUMP("\n"); #endif } //------------------------------------------------------------------------ // compareBlocksForSequencing: Compare two basic blocks for sequencing order. // // Arguments: // block1 - the first block for comparison // block2 - the second block for comparison // useBlockWeights - whether to use block weights for comparison // // Return Value: // -1 if block1 is preferred. // 0 if the blocks are equivalent. // 1 if block2 is preferred. // // Notes: // See addToBlockSequenceWorkList. int LinearScan::compareBlocksForSequencing(BasicBlock* block1, BasicBlock* block2, bool useBlockWeights) { if (useBlockWeights) { weight_t weight1 = block1->getBBWeight(compiler); weight_t weight2 = block2->getBBWeight(compiler); if (weight1 > weight2) { return -1; } else if (weight1 < weight2) { return 1; } } // If weights are the same prefer LOWER bbnum if (block1->bbNum < block2->bbNum) { return -1; } else if (block1->bbNum == block2->bbNum) { return 0; } else { return 1; } } //------------------------------------------------------------------------ // addToBlockSequenceWorkList: Add a BasicBlock to the work list for sequencing. // // Arguments: // sequencedBlockSet - the set of blocks that are already sequenced // block - the new block to be added // predSet - the buffer to save predecessors set. A block set allocated by the caller used here as a // temporary block set for constructing a predecessor set. Allocated by the caller to avoid reallocating a new block // set with every call to this function // // Return Value: // None. // // Notes: // The first block in the list will be the next one to be sequenced, as soon // as we encounter a block whose successors have all been sequenced, in pred-first // order, or the very next block if we are traversing in random order (once implemented). // This method uses a comparison method to determine the order in which to place // the blocks in the list. This method queries whether all predecessors of the // block are sequenced at the time it is added to the list and if so uses block weights // for inserting the block. A block is never inserted ahead of its predecessors. // A block at the time of insertion may not have all its predecessors sequenced, in // which case it will be sequenced based on its block number. Once a block is inserted, // its priority\order will not be changed later once its remaining predecessors are // sequenced. This would mean that work list may not be sorted entirely based on // block weights alone. // // Note also that, when random traversal order is implemented, this method // should insert the blocks into the list in random order, so that we can always // simply select the first block in the list. void LinearScan::addToBlockSequenceWorkList(BlockSet sequencedBlockSet, BasicBlock* block, BlockSet& predSet) { // The block that is being added is not already sequenced assert(!BlockSetOps::IsMember(compiler, sequencedBlockSet, block->bbNum)); // Get predSet of block BlockSetOps::ClearD(compiler, predSet); for (BasicBlock* const predBlock : block->PredBlocks()) { BlockSetOps::AddElemD(compiler, predSet, predBlock->bbNum); } // If either a rarely run block or all its preds are already sequenced, use block's weight to sequence bool useBlockWeight = block->isRunRarely() || BlockSetOps::IsSubset(compiler, sequencedBlockSet, predSet); JITDUMP(", Criteria: %s", useBlockWeight ? "weight" : "bbNum"); BasicBlockList* prevNode = nullptr; BasicBlockList* nextNode = blockSequenceWorkList; while (nextNode != nullptr) { int seqResult; if (nextNode->block->isRunRarely()) { // If the block that is yet to be sequenced is a rarely run block, always use block weights for sequencing seqResult = compareBlocksForSequencing(nextNode->block, block, true); } else if (BlockSetOps::IsMember(compiler, predSet, nextNode->block->bbNum)) { // always prefer unsequenced pred blocks seqResult = -1; } else { seqResult = compareBlocksForSequencing(nextNode->block, block, useBlockWeight); } if (seqResult > 0) { break; } prevNode = nextNode; nextNode = nextNode->next; } BasicBlockList* newListNode = new (compiler, CMK_LSRA) BasicBlockList(block, nextNode); if (prevNode == nullptr) { blockSequenceWorkList = newListNode; } else { prevNode->next = newListNode; } #ifdef DEBUG nextNode = blockSequenceWorkList; JITDUMP(", Worklist: ["); while (nextNode != nullptr) { JITDUMP(FMT_BB " ", nextNode->block->bbNum); nextNode = nextNode->next; } JITDUMP("]\n"); #endif } void LinearScan::removeFromBlockSequenceWorkList(BasicBlockList* listNode, BasicBlockList* prevNode) { if (listNode == blockSequenceWorkList) { assert(prevNode == nullptr); blockSequenceWorkList = listNode->next; } else { assert(prevNode != nullptr && prevNode->next == listNode); prevNode->next = listNode->next; } // TODO-Cleanup: consider merging Compiler::BlockListNode and BasicBlockList // compiler->FreeBlockListNode(listNode); } // Initialize the block order for allocation (called each time a new traversal begins). BasicBlock* LinearScan::startBlockSequence() { if (!blockSequencingDone) { setBlockSequence(); } else { clearVisitedBlocks(); } BasicBlock* curBB = compiler->fgFirstBB; curBBSeqNum = 0; curBBNum = curBB->bbNum; assert(blockSequence[0] == compiler->fgFirstBB); markBlockVisited(curBB); return curBB; } //------------------------------------------------------------------------ // moveToNextBlock: Move to the next block in order for allocation or resolution. // // Arguments: // None // // Return Value: // The next block. // // Notes: // This method is used when the next block is actually going to be handled. // It changes curBBNum. BasicBlock* LinearScan::moveToNextBlock() { BasicBlock* nextBlock = getNextBlock(); curBBSeqNum++; if (nextBlock != nullptr) { curBBNum = nextBlock->bbNum; } return nextBlock; } //------------------------------------------------------------------------ // getNextBlock: Get the next block in order for allocation or resolution. // // Arguments: // None // // Return Value: // The next block. // // Notes: // This method does not actually change the current block - it is used simply // to determine which block will be next. BasicBlock* LinearScan::getNextBlock() { assert(blockSequencingDone); unsigned int nextBBSeqNum = curBBSeqNum + 1; if (nextBBSeqNum < bbSeqCount) { return blockSequence[nextBBSeqNum]; } return nullptr; } //------------------------------------------------------------------------ // doLinearScan: The main method for register allocation. // // Arguments: // None // // Return Value: // None. // void LinearScan::doLinearScan() { // Check to see whether we have any local variables to enregister. // We initialize this in the constructor based on opt settings, // but we don't want to spend time on the lclVar parts of LinearScan // if we have no tracked locals. if (enregisterLocalVars && (compiler->lvaTrackedCount == 0)) { enregisterLocalVars = false; } splitBBNumToTargetBBNumMap = nullptr; // This is complicated by the fact that physical registers have refs associated // with locations where they are killed (e.g. calls), but we don't want to // count these as being touched. compiler->codeGen->regSet.rsClearRegsModified(); initMaxSpill(); buildIntervals(); DBEXEC(VERBOSE, TupleStyleDump(LSRA_DUMP_REFPOS)); compiler->EndPhase(PHASE_LINEAR_SCAN_BUILD); DBEXEC(VERBOSE, lsraDumpIntervals("after buildIntervals")); initVarRegMaps(); allocateRegisters(); allocationPassComplete = true; compiler->EndPhase(PHASE_LINEAR_SCAN_ALLOC); resolveRegisters(); compiler->EndPhase(PHASE_LINEAR_SCAN_RESOLVE); assert(blockSequencingDone); // Should do at least one traversal. assert(blockEpoch == compiler->GetCurBasicBlockEpoch()); #if TRACK_LSRA_STATS if ((JitConfig.DisplayLsraStats() == 1) #ifdef DEBUG || VERBOSE #endif ) { dumpLsraStats(jitstdout); } #endif // TRACK_LSRA_STATS DBEXEC(VERBOSE, TupleStyleDump(LSRA_DUMP_POST)); #ifdef DEBUG compiler->fgDebugCheckLinks(); #endif compiler->compLSRADone = true; } //------------------------------------------------------------------------ // recordVarLocationsAtStartOfBB: Update live-in LclVarDscs with the appropriate // register location at the start of a block, during codegen. // // Arguments: // bb - the block for which code is about to be generated. // // Return Value: // None. // // Assumptions: // CodeGen will take care of updating the reg masks and the current var liveness, // after calling this method. // This is because we need to kill off the dead registers before setting the newly live ones. void LinearScan::recordVarLocationsAtStartOfBB(BasicBlock* bb) { if (!enregisterLocalVars) { return; } JITDUMP("Recording Var Locations at start of " FMT_BB "\n", bb->bbNum); VarToRegMap map = getInVarToRegMap(bb->bbNum); unsigned count = 0; VarSetOps::AssignNoCopy(compiler, currentLiveVars, VarSetOps::Intersection(compiler, registerCandidateVars, bb->bbLiveIn)); VarSetOps::Iter iter(compiler, currentLiveVars); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { unsigned varNum = compiler->lvaTrackedIndexToLclNum(varIndex); LclVarDsc* varDsc = compiler->lvaGetDesc(varNum); regNumber oldRegNum = varDsc->GetRegNum(); regNumber newRegNum = getVarReg(map, varIndex); if (oldRegNum != newRegNum) { JITDUMP(" V%02u(%s->%s)", varNum, compiler->compRegVarName(oldRegNum), compiler->compRegVarName(newRegNum)); varDsc->SetRegNum(newRegNum); count++; #ifdef USING_VARIABLE_LIVE_RANGE BasicBlock* prevReportedBlock = bb->bbPrev; if (bb->bbPrev != nullptr && bb->bbPrev->isBBCallAlwaysPairTail()) { // For callf+always pair we generate the code for the always // block in genCallFinally and skip it, so we don't report // anything for it (it has only trivial instructions, so that // does not matter much). So whether we need to rehome or not // depends on what we reported at the end of the callf block. prevReportedBlock = bb->bbPrev->bbPrev; } if (prevReportedBlock != nullptr && VarSetOps::IsMember(compiler, prevReportedBlock->bbLiveOut, varIndex)) { // varDsc was alive on previous block end so it has an open // "VariableLiveRange" which should change to be according to // "getInVarToRegMap" compiler->codeGen->getVariableLiveKeeper()->siUpdateVariableLiveRange(varDsc, varNum); } #endif // USING_VARIABLE_LIVE_RANGE } else if (newRegNum != REG_STK) { JITDUMP(" V%02u(%s)", varNum, compiler->compRegVarName(newRegNum)); count++; } } if (count == 0) { JITDUMP(" <none>\n"); } JITDUMP("\n"); } void Interval::setLocalNumber(Compiler* compiler, unsigned lclNum, LinearScan* linScan) { const LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum); assert(varDsc->lvTracked); assert(varDsc->lvVarIndex < compiler->lvaTrackedCount); linScan->localVarIntervals[varDsc->lvVarIndex] = this; assert(linScan->getIntervalForLocalVar(varDsc->lvVarIndex) == this); this->isLocalVar = true; this->varNum = lclNum; } //------------------------------------------------------------------------ // LinearScan:identifyCandidatesExceptionDataflow: Build the set of variables exposed on EH flow edges // // Notes: // This logic was originally cloned from fgInterBlockLocalVarLiveness. // void LinearScan::identifyCandidatesExceptionDataflow() { for (BasicBlock* const block : compiler->Blocks()) { if (block->hasEHBoundaryIn()) { // live on entry to handler VarSetOps::UnionD(compiler, exceptVars, block->bbLiveIn); } if (block->hasEHBoundaryOut()) { VarSetOps::UnionD(compiler, exceptVars, block->bbLiveOut); if (block->bbJumpKind == BBJ_EHFINALLYRET) { // Live on exit from finally. // We track these separately because, in addition to having EH live-out semantics, // we need to mark them must-init. VarSetOps::UnionD(compiler, finallyVars, block->bbLiveOut); } } } #ifdef DEBUG if (VERBOSE) { JITDUMP("EH Vars: "); INDEBUG(dumpConvertedVarSet(compiler, exceptVars)); JITDUMP("\nFinally Vars: "); INDEBUG(dumpConvertedVarSet(compiler, finallyVars)); JITDUMP("\n\n"); } // All variables live on exit from a 'finally' block should be marked lvLiveInOutOfHndlr. // and as 'explicitly initialized' (must-init) for GC-ref types. VarSetOps::Iter iter(compiler, exceptVars); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { unsigned varNum = compiler->lvaTrackedIndexToLclNum(varIndex); LclVarDsc* varDsc = compiler->lvaGetDesc(varNum); assert(varDsc->lvLiveInOutOfHndlr); if (varTypeIsGC(varDsc) && VarSetOps::IsMember(compiler, finallyVars, varIndex) && !varDsc->lvIsParam) { assert(varDsc->lvMustInit); } } #endif } bool LinearScan::isRegCandidate(LclVarDsc* varDsc) { if (!enregisterLocalVars) { return false; } assert(compiler->compEnregLocals()); if (!varDsc->lvTracked) { return false; } #if !defined(TARGET_64BIT) if (varDsc->lvType == TYP_LONG) { // Long variables should not be register candidates. // Lowering will have split any candidate lclVars into lo/hi vars. return false; } #endif // !defined(TARGET_64BIT) // If we have JMP, reg args must be put on the stack if (compiler->compJmpOpUsed && varDsc->lvIsRegArg) { return false; } // Don't allocate registers for dependently promoted struct fields if (compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc)) { return false; } // Don't enregister if the ref count is zero. if (varDsc->lvRefCnt() == 0) { varDsc->setLvRefCntWtd(0); return false; } // Variables that are address-exposed are never enregistered, or tracked. // A struct may be promoted, and a struct that fits in a register may be fully enregistered. // Pinned variables may not be tracked (a condition of the GCInfo representation) // or enregistered, on x86 -- it is believed that we can enregister pinned (more properly, "pinning") // references when using the general GC encoding. unsigned lclNum = compiler->lvaGetLclNum(varDsc); if (varDsc->IsAddressExposed() || !varDsc->IsEnregisterableType() || (!compiler->compEnregStructLocals() && (varDsc->lvType == TYP_STRUCT))) { #ifdef DEBUG DoNotEnregisterReason dner; if (varDsc->IsAddressExposed()) { dner = DoNotEnregisterReason::AddrExposed; } else if (!varDsc->IsEnregisterableType()) { dner = DoNotEnregisterReason::NotRegSizeStruct; } else { dner = DoNotEnregisterReason::DontEnregStructs; } #endif // DEBUG compiler->lvaSetVarDoNotEnregister(lclNum DEBUGARG(dner)); return false; } else if (varDsc->lvPinned) { varDsc->lvTracked = 0; #ifdef JIT32_GCENCODER compiler->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::PinningRef)); #endif // JIT32_GCENCODER return false; } // Are we not optimizing and we have exception handlers? // if so mark all args and locals as volatile, so that they // won't ever get enregistered. // if (compiler->opts.MinOpts() && compiler->compHndBBtabCount > 0) { compiler->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LiveInOutOfHandler)); } if (varDsc->lvDoNotEnregister) { return false; } switch (genActualType(varDsc->TypeGet())) { case TYP_FLOAT: case TYP_DOUBLE: return !compiler->opts.compDbgCode; case TYP_INT: case TYP_LONG: case TYP_REF: case TYP_BYREF: break; #ifdef FEATURE_SIMD case TYP_SIMD8: case TYP_SIMD12: case TYP_SIMD16: case TYP_SIMD32: return !varDsc->lvPromoted; #endif // FEATURE_SIMD case TYP_STRUCT: // TODO-1stClassStructs: support vars with GC pointers. The issue is that such // vars will have `lvMustInit` set, because emitter has poor support for struct liveness, // but if the variable is tracked the prolog generator would expect it to be in liveIn set, // so an assert in `genFnProlog` will fire. return compiler->compEnregStructLocals() && !varDsc->HasGCPtr(); case TYP_UNDEF: case TYP_UNKNOWN: noway_assert(!"lvType not set correctly"); varDsc->lvType = TYP_INT; return false; default: return false; } return true; } // Identify locals & compiler temps that are register candidates // TODO-Cleanup: This was cloned from Compiler::lvaSortByRefCount() in lclvars.cpp in order // to avoid perturbation, but should be merged. void LinearScan::identifyCandidates() { if (enregisterLocalVars) { // Initialize the set of lclVars that are candidates for register allocation. VarSetOps::AssignNoCopy(compiler, registerCandidateVars, VarSetOps::MakeEmpty(compiler)); // Initialize the sets of lclVars that are used to determine whether, and for which lclVars, // we need to perform resolution across basic blocks. // Note that we can't do this in the constructor because the number of tracked lclVars may // change between the constructor and the actual allocation. VarSetOps::AssignNoCopy(compiler, resolutionCandidateVars, VarSetOps::MakeEmpty(compiler)); VarSetOps::AssignNoCopy(compiler, splitOrSpilledVars, VarSetOps::MakeEmpty(compiler)); // We set enregisterLocalVars to true only if there are tracked lclVars assert(compiler->lvaCount != 0); } else if (compiler->lvaCount == 0) { // Nothing to do. Note that even if enregisterLocalVars is false, we still need to set the // lvLRACandidate field on all the lclVars to false if we have any. return; } VarSetOps::AssignNoCopy(compiler, exceptVars, VarSetOps::MakeEmpty(compiler)); VarSetOps::AssignNoCopy(compiler, finallyVars, VarSetOps::MakeEmpty(compiler)); if (compiler->compHndBBtabCount > 0) { identifyCandidatesExceptionDataflow(); } unsigned lclNum; LclVarDsc* varDsc; // While we build intervals for the candidate lclVars, we will determine the floating point // lclVars, if any, to consider for callee-save register preferencing. // We maintain two sets of FP vars - those that meet the first threshold of weighted ref Count, // and those that meet the second. // The first threshold is used for methods that are heuristically deemed either to have light // fp usage, or other factors that encourage conservative use of callee-save registers, such // as multiple exits (where there might be an early exit that woudl be excessively penalized by // lots of prolog/epilog saves & restores). // The second threshold is used where there are factors deemed to make it more likely that fp // fp callee save registers will be needed, such as loops or many fp vars. // We keep two sets of vars, since we collect some of the information to determine which set to // use as we iterate over the vars. // When we are generating AVX code on non-Unix (FEATURE_PARTIAL_SIMD_CALLEE_SAVE), we maintain an // additional set of LargeVectorType vars, and there is a separate threshold defined for those. // It is assumed that if we encounter these, that we should consider this a "high use" scenario, // so we don't maintain two sets of these vars. // This is defined as thresholdLargeVectorRefCntWtd, as we are likely to use the same mechanism // for vectors on Arm64, though the actual value may differ. unsigned int floatVarCount = 0; weight_t thresholdFPRefCntWtd = 4 * BB_UNITY_WEIGHT; weight_t maybeFPRefCntWtd = 2 * BB_UNITY_WEIGHT; VARSET_TP fpMaybeCandidateVars(VarSetOps::UninitVal()); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE unsigned int largeVectorVarCount = 0; weight_t thresholdLargeVectorRefCntWtd = 4 * BB_UNITY_WEIGHT; #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE if (enregisterLocalVars) { VarSetOps::AssignNoCopy(compiler, fpCalleeSaveCandidateVars, VarSetOps::MakeEmpty(compiler)); VarSetOps::AssignNoCopy(compiler, fpMaybeCandidateVars, VarSetOps::MakeEmpty(compiler)); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE VarSetOps::AssignNoCopy(compiler, largeVectorVars, VarSetOps::MakeEmpty(compiler)); VarSetOps::AssignNoCopy(compiler, largeVectorCalleeSaveCandidateVars, VarSetOps::MakeEmpty(compiler)); #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE } #if DOUBLE_ALIGN unsigned refCntStk = 0; unsigned refCntReg = 0; weight_t refCntWtdReg = 0; unsigned refCntStkParam = 0; // sum of ref counts for all stack based parameters weight_t refCntWtdStkDbl = 0; // sum of wtd ref counts for stack based doubles doDoubleAlign = false; bool checkDoubleAlign = true; if (compiler->codeGen->isFramePointerRequired() || compiler->opts.MinOpts()) { checkDoubleAlign = false; } else { switch (compiler->getCanDoubleAlign()) { case MUST_DOUBLE_ALIGN: doDoubleAlign = true; checkDoubleAlign = false; break; case CAN_DOUBLE_ALIGN: break; case CANT_DOUBLE_ALIGN: doDoubleAlign = false; checkDoubleAlign = false; break; default: unreached(); } } #endif // DOUBLE_ALIGN // Check whether register variables are permitted. if (!enregisterLocalVars) { localVarIntervals = nullptr; } else if (compiler->lvaTrackedCount > 0) { // initialize mapping from tracked local to interval localVarIntervals = new (compiler, CMK_LSRA) Interval*[compiler->lvaTrackedCount]; } INTRACK_STATS(regCandidateVarCount = 0); for (lclNum = 0, varDsc = compiler->lvaTable; lclNum < compiler->lvaCount; lclNum++, varDsc++) { // Initialize all variables to REG_STK varDsc->SetRegNum(REG_STK); #ifndef TARGET_64BIT varDsc->SetOtherReg(REG_STK); #endif // TARGET_64BIT if (!enregisterLocalVars) { varDsc->lvLRACandidate = false; continue; } #if DOUBLE_ALIGN if (checkDoubleAlign) { if (varDsc->lvIsParam && !varDsc->lvIsRegArg) { refCntStkParam += varDsc->lvRefCnt(); } else if (!isRegCandidate(varDsc) || varDsc->lvDoNotEnregister) { refCntStk += varDsc->lvRefCnt(); if ((varDsc->lvType == TYP_DOUBLE) || ((varTypeIsStruct(varDsc) && varDsc->lvStructDoubleAlign && (compiler->lvaGetPromotionType(varDsc) != Compiler::PROMOTION_TYPE_INDEPENDENT)))) { refCntWtdStkDbl += varDsc->lvRefCntWtd(); } } else { refCntReg += varDsc->lvRefCnt(); refCntWtdReg += varDsc->lvRefCntWtd(); } } #endif // DOUBLE_ALIGN // Start with the assumption that it's a candidate. varDsc->lvLRACandidate = 1; // Start with lvRegister as false - set it true only if the variable gets // the same register assignment throughout varDsc->lvRegister = false; if (!isRegCandidate(varDsc)) { varDsc->lvLRACandidate = 0; if (varDsc->lvTracked) { localVarIntervals[varDsc->lvVarIndex] = nullptr; } // The current implementation of multi-reg structs that are referenced collectively // (i.e. by refering to the parent lclVar rather than each field separately) relies // on all or none of the fields being candidates. if (varDsc->lvIsStructField) { LclVarDsc* parentVarDsc = compiler->lvaGetDesc(varDsc->lvParentLcl); if (parentVarDsc->lvIsMultiRegRet && !parentVarDsc->lvDoNotEnregister) { JITDUMP("Setting multi-reg struct V%02u as not enregisterable:", varDsc->lvParentLcl); compiler->lvaSetVarDoNotEnregister(varDsc->lvParentLcl DEBUGARG(DoNotEnregisterReason::BlockOp)); for (unsigned int i = 0; i < parentVarDsc->lvFieldCnt; i++) { LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(parentVarDsc->lvFieldLclStart + i); JITDUMP(" V%02u", parentVarDsc->lvFieldLclStart + i); if (fieldVarDsc->lvTracked) { fieldVarDsc->lvLRACandidate = 0; localVarIntervals[fieldVarDsc->lvVarIndex] = nullptr; VarSetOps::RemoveElemD(compiler, registerCandidateVars, fieldVarDsc->lvVarIndex); JITDUMP("*"); } // This is not accurate, but we need a non-zero refCnt for the parent so that it will // be allocated to the stack. parentVarDsc->setLvRefCnt(parentVarDsc->lvRefCnt() + fieldVarDsc->lvRefCnt()); } JITDUMP("\n"); } } continue; } if (varDsc->lvLRACandidate) { var_types type = varDsc->GetActualRegisterType(); if (varTypeUsesFloatReg(type)) { compiler->compFloatingPointUsed = true; } Interval* newInt = newInterval(type); newInt->setLocalNumber(compiler, lclNum, this); VarSetOps::AddElemD(compiler, registerCandidateVars, varDsc->lvVarIndex); // we will set this later when we have determined liveness varDsc->lvMustInit = false; if (varDsc->lvIsStructField) { newInt->isStructField = true; } if (varDsc->lvLiveInOutOfHndlr) { newInt->isWriteThru = varDsc->lvSingleDefRegCandidate; setIntervalAsSpilled(newInt); } INTRACK_STATS(regCandidateVarCount++); // We maintain two sets of FP vars - those that meet the first threshold of weighted ref Count, // and those that meet the second (see the definitions of thresholdFPRefCntWtd and maybeFPRefCntWtd // above). CLANG_FORMAT_COMMENT_ANCHOR; #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // Additionally, when we are generating code for a target with partial SIMD callee-save // (AVX on non-UNIX amd64 and 16-byte vectors on arm64), we keep a separate set of the // LargeVectorType vars. if (Compiler::varTypeNeedsPartialCalleeSave(varDsc->GetRegisterType())) { largeVectorVarCount++; VarSetOps::AddElemD(compiler, largeVectorVars, varDsc->lvVarIndex); weight_t refCntWtd = varDsc->lvRefCntWtd(); if (refCntWtd >= thresholdLargeVectorRefCntWtd) { VarSetOps::AddElemD(compiler, largeVectorCalleeSaveCandidateVars, varDsc->lvVarIndex); } } else #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE if (regType(type) == FloatRegisterType) { floatVarCount++; weight_t refCntWtd = varDsc->lvRefCntWtd(); if (varDsc->lvIsRegArg) { // Don't count the initial reference for register params. In those cases, // using a callee-save causes an extra copy. refCntWtd -= BB_UNITY_WEIGHT; } if (refCntWtd >= thresholdFPRefCntWtd) { VarSetOps::AddElemD(compiler, fpCalleeSaveCandidateVars, varDsc->lvVarIndex); } else if (refCntWtd >= maybeFPRefCntWtd) { VarSetOps::AddElemD(compiler, fpMaybeCandidateVars, varDsc->lvVarIndex); } } JITDUMP(" "); DBEXEC(VERBOSE, newInt->dump()); } else { localVarIntervals[varDsc->lvVarIndex] = nullptr; } } #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // Create Intervals to use for the save & restore of the upper halves of large vector lclVars. if (enregisterLocalVars) { VarSetOps::Iter largeVectorVarsIter(compiler, largeVectorVars); unsigned largeVectorVarIndex = 0; while (largeVectorVarsIter.NextElem(&largeVectorVarIndex)) { makeUpperVectorInterval(largeVectorVarIndex); } } #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE #if DOUBLE_ALIGN if (checkDoubleAlign) { // TODO-CQ: Fine-tune this: // In the legacy reg predictor, this runs after allocation, and then demotes any lclVars // allocated to the frame pointer, which is probably the wrong order. // However, because it runs after allocation, it can determine the impact of demoting // the lclVars allocated to the frame pointer. // => Here, estimate of the EBP refCnt and weighted refCnt is a wild guess. // unsigned refCntEBP = refCntReg / 8; weight_t refCntWtdEBP = refCntWtdReg / 8; doDoubleAlign = compiler->shouldDoubleAlign(refCntStk, refCntEBP, refCntWtdEBP, refCntStkParam, refCntWtdStkDbl); } #endif // DOUBLE_ALIGN // The factors we consider to determine which set of fp vars to use as candidates for callee save // registers current include the number of fp vars, whether there are loops, and whether there are // multiple exits. These have been selected somewhat empirically, but there is probably room for // more tuning. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (VERBOSE) { printf("\nFP callee save candidate vars: "); if (enregisterLocalVars && !VarSetOps::IsEmpty(compiler, fpCalleeSaveCandidateVars)) { dumpConvertedVarSet(compiler, fpCalleeSaveCandidateVars); printf("\n"); } else { printf("None\n\n"); } } #endif JITDUMP("floatVarCount = %d; hasLoops = %s, singleExit = %s\n", floatVarCount, dspBool(compiler->fgHasLoops), dspBool(compiler->fgReturnBlocks == nullptr || compiler->fgReturnBlocks->next == nullptr)); // Determine whether to use the 2nd, more aggressive, threshold for fp callee saves. if (floatVarCount > 6 && compiler->fgHasLoops && (compiler->fgReturnBlocks == nullptr || compiler->fgReturnBlocks->next == nullptr)) { assert(enregisterLocalVars); #ifdef DEBUG if (VERBOSE) { printf("Adding additional fp callee save candidates: \n"); if (!VarSetOps::IsEmpty(compiler, fpMaybeCandidateVars)) { dumpConvertedVarSet(compiler, fpMaybeCandidateVars); printf("\n"); } else { printf("None\n\n"); } } #endif VarSetOps::UnionD(compiler, fpCalleeSaveCandidateVars, fpMaybeCandidateVars); } // From here on, we're only interested in the exceptVars that are candidates. if (enregisterLocalVars && (compiler->compHndBBtabCount > 0)) { VarSetOps::IntersectionD(compiler, exceptVars, registerCandidateVars); } #ifdef TARGET_ARM #ifdef DEBUG if (VERBOSE) { // Frame layout is only pre-computed for ARM printf("\nlvaTable after IdentifyCandidates\n"); compiler->lvaTableDump(Compiler::FrameLayoutState::PRE_REGALLOC_FRAME_LAYOUT); } #endif // DEBUG #endif // TARGET_ARM } // TODO-Throughput: This mapping can surely be more efficiently done void LinearScan::initVarRegMaps() { if (!enregisterLocalVars) { inVarToRegMaps = nullptr; outVarToRegMaps = nullptr; return; } assert(compiler->lvaTrackedFixed); // We should have already set this to prevent us from adding any new tracked // variables. // The compiler memory allocator requires that the allocation be an // even multiple of int-sized objects unsigned int varCount = compiler->lvaTrackedCount; regMapCount = roundUp(varCount, (unsigned)sizeof(int)); // Not sure why blocks aren't numbered from zero, but they don't appear to be. // So, if we want to index by bbNum we have to know the maximum value. unsigned int bbCount = compiler->fgBBNumMax + 1; inVarToRegMaps = new (compiler, CMK_LSRA) regNumberSmall*[bbCount]; outVarToRegMaps = new (compiler, CMK_LSRA) regNumberSmall*[bbCount]; if (varCount > 0) { // This VarToRegMap is used during the resolution of critical edges. sharedCriticalVarToRegMap = new (compiler, CMK_LSRA) regNumberSmall[regMapCount]; for (unsigned int i = 0; i < bbCount; i++) { VarToRegMap inVarToRegMap = new (compiler, CMK_LSRA) regNumberSmall[regMapCount]; VarToRegMap outVarToRegMap = new (compiler, CMK_LSRA) regNumberSmall[regMapCount]; for (unsigned int j = 0; j < regMapCount; j++) { inVarToRegMap[j] = REG_STK; outVarToRegMap[j] = REG_STK; } inVarToRegMaps[i] = inVarToRegMap; outVarToRegMaps[i] = outVarToRegMap; } } else { sharedCriticalVarToRegMap = nullptr; for (unsigned int i = 0; i < bbCount; i++) { inVarToRegMaps[i] = nullptr; outVarToRegMaps[i] = nullptr; } } } void LinearScan::setInVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg) { assert(enregisterLocalVars); assert(reg < UCHAR_MAX && varNum < compiler->lvaCount); inVarToRegMaps[bbNum][compiler->lvaTable[varNum].lvVarIndex] = (regNumberSmall)reg; } void LinearScan::setOutVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg) { assert(enregisterLocalVars); assert(reg < UCHAR_MAX && varNum < compiler->lvaCount); outVarToRegMaps[bbNum][compiler->lvaTable[varNum].lvVarIndex] = (regNumberSmall)reg; } LinearScan::SplitEdgeInfo LinearScan::getSplitEdgeInfo(unsigned int bbNum) { assert(enregisterLocalVars); SplitEdgeInfo splitEdgeInfo; assert(bbNum <= compiler->fgBBNumMax); assert(bbNum > bbNumMaxBeforeResolution); assert(splitBBNumToTargetBBNumMap != nullptr); splitBBNumToTargetBBNumMap->Lookup(bbNum, &splitEdgeInfo); assert(splitEdgeInfo.toBBNum <= bbNumMaxBeforeResolution); assert(splitEdgeInfo.fromBBNum <= bbNumMaxBeforeResolution); return splitEdgeInfo; } VarToRegMap LinearScan::getInVarToRegMap(unsigned int bbNum) { assert(enregisterLocalVars); assert(bbNum <= compiler->fgBBNumMax); // For the blocks inserted to split critical edges, the inVarToRegMap is // equal to the outVarToRegMap at the "from" block. if (bbNum > bbNumMaxBeforeResolution) { SplitEdgeInfo splitEdgeInfo = getSplitEdgeInfo(bbNum); unsigned fromBBNum = splitEdgeInfo.fromBBNum; if (fromBBNum == 0) { assert(splitEdgeInfo.toBBNum != 0); return inVarToRegMaps[splitEdgeInfo.toBBNum]; } else { return outVarToRegMaps[fromBBNum]; } } return inVarToRegMaps[bbNum]; } VarToRegMap LinearScan::getOutVarToRegMap(unsigned int bbNum) { assert(enregisterLocalVars); assert(bbNum <= compiler->fgBBNumMax); if (bbNum == 0) { return nullptr; } // For the blocks inserted to split critical edges, the outVarToRegMap is // equal to the inVarToRegMap at the target. if (bbNum > bbNumMaxBeforeResolution) { // If this is an empty block, its in and out maps are both the same. // We identify this case by setting fromBBNum or toBBNum to 0, and using only the other. SplitEdgeInfo splitEdgeInfo = getSplitEdgeInfo(bbNum); unsigned toBBNum = splitEdgeInfo.toBBNum; if (toBBNum == 0) { assert(splitEdgeInfo.fromBBNum != 0); return outVarToRegMaps[splitEdgeInfo.fromBBNum]; } else { return inVarToRegMaps[toBBNum]; } } return outVarToRegMaps[bbNum]; } //------------------------------------------------------------------------ // setVarReg: Set the register associated with a variable in the given 'bbVarToRegMap'. // // Arguments: // bbVarToRegMap - the map of interest // trackedVarIndex - the lvVarIndex for the variable // reg - the register to which it is being mapped // // Return Value: // None // void LinearScan::setVarReg(VarToRegMap bbVarToRegMap, unsigned int trackedVarIndex, regNumber reg) { assert(trackedVarIndex < compiler->lvaTrackedCount); regNumberSmall regSmall = (regNumberSmall)reg; assert((regNumber)regSmall == reg); bbVarToRegMap[trackedVarIndex] = regSmall; } //------------------------------------------------------------------------ // getVarReg: Get the register associated with a variable in the given 'bbVarToRegMap'. // // Arguments: // bbVarToRegMap - the map of interest // trackedVarIndex - the lvVarIndex for the variable // // Return Value: // The register to which 'trackedVarIndex' is mapped // regNumber LinearScan::getVarReg(VarToRegMap bbVarToRegMap, unsigned int trackedVarIndex) { assert(enregisterLocalVars); assert(trackedVarIndex < compiler->lvaTrackedCount); return (regNumber)bbVarToRegMap[trackedVarIndex]; } // Initialize the incoming VarToRegMap to the given map values (generally a predecessor of // the block) VarToRegMap LinearScan::setInVarToRegMap(unsigned int bbNum, VarToRegMap srcVarToRegMap) { assert(enregisterLocalVars); VarToRegMap inVarToRegMap = inVarToRegMaps[bbNum]; memcpy(inVarToRegMap, srcVarToRegMap, (regMapCount * sizeof(regNumber))); return inVarToRegMap; } //------------------------------------------------------------------------ // checkLastUses: Check correctness of last use flags // // Arguments: // The block for which we are checking last uses. // // Notes: // This does a backward walk of the RefPositions, starting from the liveOut set. // This method was previously used to set the last uses, which were computed by // liveness, but were not create in some cases of multiple lclVar references in the // same tree. However, now that last uses are computed as RefPositions are created, // that is no longer necessary, and this method is simply retained as a check. // The exception to the check-only behavior is when LSRA_EXTEND_LIFETIMES if set via // COMPlus_JitStressRegs. In that case, this method is required, because even though // the RefPositions will not be marked lastUse in that case, we still need to correclty // mark the last uses on the tree nodes, which is done by this method. // #ifdef DEBUG void LinearScan::checkLastUses(BasicBlock* block) { if (VERBOSE) { JITDUMP("\n\nCHECKING LAST USES for " FMT_BB ", liveout=", block->bbNum); dumpConvertedVarSet(compiler, block->bbLiveOut); JITDUMP("\n==============================\n"); } unsigned keepAliveVarNum = BAD_VAR_NUM; if (compiler->lvaKeepAliveAndReportThis()) { keepAliveVarNum = compiler->info.compThisArg; assert(compiler->info.compIsStatic == false); } // find which uses are lastUses // Work backwards starting with live out. // 'computedLive' is updated to include any exposed use (including those in this // block that we've already seen). When we encounter a use, if it's // not in that set, then it's a last use. VARSET_TP computedLive(VarSetOps::MakeCopy(compiler, block->bbLiveOut)); bool foundDiff = false; RefPositionReverseIterator reverseIterator = refPositions.rbegin(); RefPosition* currentRefPosition; for (currentRefPosition = &reverseIterator; currentRefPosition->refType != RefTypeBB; reverseIterator++, currentRefPosition = &reverseIterator) { // We should never see ParamDefs or ZeroInits within a basic block. assert(currentRefPosition->refType != RefTypeParamDef && currentRefPosition->refType != RefTypeZeroInit); if (currentRefPosition->isIntervalRef() && currentRefPosition->getInterval()->isLocalVar) { unsigned varNum = currentRefPosition->getInterval()->varNum; unsigned varIndex = currentRefPosition->getInterval()->getVarIndex(compiler); LsraLocation loc = currentRefPosition->nodeLocation; // We should always have a tree node for a localVar, except for the "special" RefPositions. GenTree* tree = currentRefPosition->treeNode; assert(tree != nullptr || currentRefPosition->refType == RefTypeExpUse || currentRefPosition->refType == RefTypeDummyDef); if (!VarSetOps::IsMember(compiler, computedLive, varIndex) && varNum != keepAliveVarNum) { // There was no exposed use, so this is a "last use" (and we mark it thus even if it's a def) if (extendLifetimes()) { // NOTE: this is a bit of a hack. When extending lifetimes, the "last use" bit will be clear. // This bit, however, would normally be used during resolveLocalRef to set the value of // LastUse on the node for a ref position. If this bit is not set correctly even when // extending lifetimes, the code generator will assert as it expects to have accurate last // use information. To avoid these asserts, set the LastUse bit here. // Note also that extendLifetimes() is an LSRA stress mode, so it will only be true for // Checked or Debug builds, for which this method will be executed. if (tree != nullptr) { tree->AsLclVar()->SetLastUse(currentRefPosition->multiRegIdx); } } else if (!currentRefPosition->lastUse) { JITDUMP("missing expected last use of V%02u @%u\n", compiler->lvaTrackedIndexToLclNum(varIndex), loc); foundDiff = true; } VarSetOps::AddElemD(compiler, computedLive, varIndex); } else if (currentRefPosition->lastUse) { JITDUMP("unexpected last use of V%02u @%u\n", compiler->lvaTrackedIndexToLclNum(varIndex), loc); foundDiff = true; } else if (extendLifetimes() && tree != nullptr) { // NOTE: see the comment above re: the extendLifetimes hack. tree->AsLclVar()->ClearLastUse(currentRefPosition->multiRegIdx); } if (currentRefPosition->refType == RefTypeDef || currentRefPosition->refType == RefTypeDummyDef) { VarSetOps::RemoveElemD(compiler, computedLive, varIndex); } } assert(reverseIterator != refPositions.rend()); } VARSET_TP liveInNotComputedLive(VarSetOps::Diff(compiler, block->bbLiveIn, computedLive)); // We may have exception vars in the liveIn set of exception blocks that are not computed live. if (compiler->ehBlockHasExnFlowDsc(block)) { VarSetOps::DiffD(compiler, liveInNotComputedLive, compiler->fgGetHandlerLiveVars(block)); } VarSetOps::Iter liveInNotComputedLiveIter(compiler, liveInNotComputedLive); unsigned liveInNotComputedLiveIndex = 0; while (liveInNotComputedLiveIter.NextElem(&liveInNotComputedLiveIndex)) { LclVarDsc* varDesc = compiler->lvaGetDescByTrackedIndex(liveInNotComputedLiveIndex); if (varDesc->lvLRACandidate) { JITDUMP(FMT_BB ": V%02u is in LiveIn set, but not computed live.\n", block->bbNum, compiler->lvaTrackedIndexToLclNum(liveInNotComputedLiveIndex)); foundDiff = true; } } VarSetOps::DiffD(compiler, computedLive, block->bbLiveIn); const VARSET_TP& computedLiveNotLiveIn(computedLive); // reuse the buffer. VarSetOps::Iter computedLiveNotLiveInIter(compiler, computedLiveNotLiveIn); unsigned computedLiveNotLiveInIndex = 0; while (computedLiveNotLiveInIter.NextElem(&computedLiveNotLiveInIndex)) { LclVarDsc* varDesc = compiler->lvaGetDescByTrackedIndex(computedLiveNotLiveInIndex); if (varDesc->lvLRACandidate) { JITDUMP(FMT_BB ": V%02u is computed live, but not in LiveIn set.\n", block->bbNum, compiler->lvaTrackedIndexToLclNum(computedLiveNotLiveInIndex)); foundDiff = true; } } assert(!foundDiff); } #endif // DEBUG //------------------------------------------------------------------------ // findPredBlockForLiveIn: Determine which block should be used for the register locations of the live-in variables. // // Arguments: // block - The block for which we're selecting a predecesor. // prevBlock - The previous block in in allocation order. // pPredBlockIsAllocated - A debug-only argument that indicates whether any of the predecessors have been seen // in allocation order. // // Return Value: // The selected predecessor. // // Assumptions: // in DEBUG, caller initializes *pPredBlockIsAllocated to false, and it will be set to true if the block // returned is in fact a predecessor. // // Notes: // This will select a predecessor based on the heuristics obtained by getLsraBlockBoundaryLocations(), which can be // one of: // LSRA_BLOCK_BOUNDARY_PRED - Use the register locations of a predecessor block (default) // LSRA_BLOCK_BOUNDARY_LAYOUT - Use the register locations of the previous block in layout order. // This is the only case where this actually returns a different block. // LSRA_BLOCK_BOUNDARY_ROTATE - Rotate the register locations from a predecessor. // For this case, the block returned is the same as for LSRA_BLOCK_BOUNDARY_PRED, but // the register locations will be "rotated" to stress the resolution and allocation // code. BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block, BasicBlock* prevBlock DEBUGARG(bool* pPredBlockIsAllocated)) { BasicBlock* predBlock = nullptr; assert(*pPredBlockIsAllocated == false); // Blocks with exception flow on entry use no predecessor blocks, as all incoming vars // are on the stack. if (blockInfo[block->bbNum].hasEHBoundaryIn) { JITDUMP("\n\nIncoming EH boundary; "); return nullptr; } if (block == compiler->fgFirstBB) { return nullptr; } if (block->bbPreds == nullptr) { assert((block != compiler->fgFirstBB) || (prevBlock != nullptr)); JITDUMP("\n\nNo predecessor; "); // Some throw blocks do not have predecessor. For such blocks, we want to return the fact // that predecessor is indeed null instead of returning the prevBlock. Returning prevBlock // will be wrong, because LSRA would think that the variable is live in registers based on // the lexical flow, but that won't be true according to the control flow. // Example: // // IG05: // ... ; V01 is in 'rdi' // JNE IG07 // ... // IG06: // ... // ... ; V01 is in 'rbx' // JMP IG08 // IG07: // ... ; LSRA thinks V01 is in 'rbx' if IG06 is set as previous block of IG07. // .... // CALL CORINFO_HELP_RNGCHKFAIL // ... // IG08: // ... // ... if (block->bbJumpKind == BBJ_THROW) { JITDUMP(" - throw block; "); return nullptr; } // We may have unreachable blocks, due to optimization. // We don't want to set the predecessor as null in this case, since that will result in // unnecessary DummyDefs, and possibly result in inconsistencies requiring resolution // (since these unreachable blocks can have reachable successors). return prevBlock; } #ifdef DEBUG if (getLsraBlockBoundaryLocations() == LSRA_BLOCK_BOUNDARY_LAYOUT) { if (prevBlock != nullptr) { predBlock = prevBlock; } } else #endif // DEBUG { predBlock = block->GetUniquePred(compiler); if (predBlock != nullptr) { // We should already have returned null if this block has a single incoming EH boundary edge. assert(!predBlock->hasEHBoundaryOut()); if (isBlockVisited(predBlock)) { if (predBlock->bbJumpKind == BBJ_COND) { // Special handling to improve matching on backedges. BasicBlock* otherBlock = (block == predBlock->bbNext) ? predBlock->bbJumpDest : predBlock->bbNext; noway_assert(otherBlock != nullptr); if (isBlockVisited(otherBlock) && !blockInfo[otherBlock->bbNum].hasEHBoundaryIn) { // This is the case when we have a conditional branch where one target has already // been visited. It would be best to use the same incoming regs as that block, // so that we have less likelihood of having to move registers. // For example, in determining the block to use for the starting register locations for // "block" in the following example, we'd like to use the same predecessor for "block" // as for "otherBlock", so that both successors of predBlock have the same locations, reducing // the likelihood of needing a split block on a backedge: // // otherPred // | // otherBlock <-+ // . . . | // | // predBlock----+ // | // block // if (blockInfo[otherBlock->bbNum].hasEHBoundaryIn) { return nullptr; } else { for (BasicBlock* const otherPred : otherBlock->PredBlocks()) { if (otherPred->bbNum == blockInfo[otherBlock->bbNum].predBBNum) { predBlock = otherPred; break; } } } } } } else { predBlock = nullptr; } } else { for (BasicBlock* const candidatePredBlock : block->PredBlocks()) { if (isBlockVisited(candidatePredBlock)) { if ((predBlock == nullptr) || (predBlock->bbWeight < candidatePredBlock->bbWeight)) { predBlock = candidatePredBlock; INDEBUG(*pPredBlockIsAllocated = true;) } } } } if (predBlock == nullptr) { predBlock = prevBlock; assert(predBlock != nullptr); JITDUMP("\n\nNo allocated predecessor; "); } } return predBlock; } #ifdef DEBUG void LinearScan::dumpVarRefPositions(const char* title) { if (enregisterLocalVars) { printf("\nVAR REFPOSITIONS %s\n", title); for (unsigned i = 0; i < compiler->lvaCount; i++) { printf("--- V%02u", i); const LclVarDsc* varDsc = compiler->lvaGetDesc(i); if (varDsc->lvIsRegCandidate()) { Interval* interval = getIntervalForLocalVar(varDsc->lvVarIndex); printf(" (Interval %d)\n", interval->intervalIndex); for (RefPosition* ref = interval->firstRefPosition; ref != nullptr; ref = ref->nextRefPosition) { ref->dump(this); } } else { printf("\n"); } } printf("\n"); } } #endif // DEBUG // Set the default rpFrameType based upon codeGen->isFramePointerRequired() // This was lifted from the register predictor // void LinearScan::setFrameType() { FrameType frameType = FT_NOT_SET; #if DOUBLE_ALIGN compiler->codeGen->setDoubleAlign(false); if (doDoubleAlign) { frameType = FT_DOUBLE_ALIGN_FRAME; compiler->codeGen->setDoubleAlign(true); } else #endif // DOUBLE_ALIGN if (compiler->codeGen->isFramePointerRequired()) { frameType = FT_EBP_FRAME; } else { if (compiler->rpMustCreateEBPCalled == false) { #ifdef DEBUG const char* reason; #endif // DEBUG compiler->rpMustCreateEBPCalled = true; if (compiler->rpMustCreateEBPFrame(INDEBUG(&reason))) { JITDUMP("; Decided to create an EBP based frame for ETW stackwalking (%s)\n", reason); compiler->codeGen->setFrameRequired(true); } } if (compiler->codeGen->isFrameRequired()) { frameType = FT_EBP_FRAME; } else { frameType = FT_ESP_FRAME; } } switch (frameType) { case FT_ESP_FRAME: noway_assert(!compiler->codeGen->isFramePointerRequired()); noway_assert(!compiler->codeGen->isFrameRequired()); compiler->codeGen->setFramePointerUsed(false); break; case FT_EBP_FRAME: compiler->codeGen->setFramePointerUsed(true); break; #if DOUBLE_ALIGN case FT_DOUBLE_ALIGN_FRAME: noway_assert(!compiler->codeGen->isFramePointerRequired()); compiler->codeGen->setFramePointerUsed(false); break; #endif // DOUBLE_ALIGN default: noway_assert(!"rpFrameType not set correctly!"); break; } // If we are using FPBASE as the frame register, we cannot also use it for // a local var. regMaskTP removeMask = RBM_NONE; if (frameType == FT_EBP_FRAME) { removeMask |= RBM_FPBASE; } compiler->rpFrameType = frameType; #ifdef TARGET_ARMARCH // Determine whether we need to reserve a register for large lclVar offsets. if (compiler->compRsvdRegCheck(Compiler::REGALLOC_FRAME_LAYOUT)) { // We reserve R10/IP1 in this case to hold the offsets in load/store instructions compiler->codeGen->regSet.rsMaskResvd |= RBM_OPT_RSVD; assert(REG_OPT_RSVD != REG_FP); JITDUMP(" Reserved REG_OPT_RSVD (%s) due to large frame\n", getRegName(REG_OPT_RSVD)); removeMask |= RBM_OPT_RSVD; } #endif // TARGET_ARMARCH if ((removeMask != RBM_NONE) && ((availableIntRegs & removeMask) != 0)) { // We know that we're already in "read mode" for availableIntRegs. However, // we need to remove these registers, so subsequent users (like callers // to allRegs()) get the right thing. The RemoveRegistersFromMasks() code // fixes up everything that already took a dependency on the value that was // previously read, so this completes the picture. availableIntRegs.OverrideAssign(availableIntRegs & ~removeMask); } } //------------------------------------------------------------------------ // copyOrMoveRegInUse: Is 'ref' a copyReg/moveReg that is still busy at the given location? // // Arguments: // ref: The RefPosition of interest // loc: The LsraLocation at which we're determining whether it's busy. // // Return Value: // true iff 'ref' is active at the given location // bool copyOrMoveRegInUse(RefPosition* ref, LsraLocation loc) { if (!ref->copyReg && !ref->moveReg) { return false; } if (ref->getRefEndLocation() >= loc) { return true; } Interval* interval = ref->getInterval(); RefPosition* nextRef = interval->getNextRefPosition(); if (nextRef != nullptr && nextRef->treeNode == ref->treeNode && nextRef->getRefEndLocation() >= loc) { return true; } return false; } //------------------------------------------------------------------------ // getRegisterType: Get the RegisterType to use for the given RefPosition // // Arguments: // currentInterval: The interval for the current allocation // refPosition: The RefPosition of the current Interval for which a register is being allocated // // Return Value: // The RegisterType that should be allocated for this RefPosition // // Notes: // This will nearly always be identical to the registerType of the interval, except in the case // of SIMD types of 8 bytes (currently only Vector2) when they are passed and returned in integer // registers, or copied to a return temp. // This method need only be called in situations where we may be dealing with the register requirements // of a RefTypeUse RefPosition (i.e. not when we are only looking at the type of an interval, nor when // we are interested in the "defining" type of the interval). This is because the situation of interest // only happens at the use (where it must be copied to an integer register). RegisterType LinearScan::getRegisterType(Interval* currentInterval, RefPosition* refPosition) { assert(refPosition->getInterval() == currentInterval); RegisterType regType = currentInterval->registerType; regMaskTP candidates = refPosition->registerAssignment; assert((candidates & allRegs(regType)) != RBM_NONE); return regType; } //------------------------------------------------------------------------ // isMatchingConstant: Check to see whether a given register contains the constant referenced // by the given RefPosition // // Arguments: // physRegRecord: The RegRecord for the register we're interested in. // refPosition: The RefPosition for a constant interval. // // Return Value: // True iff the register was defined by an identical constant node as the current interval. // bool LinearScan::isMatchingConstant(RegRecord* physRegRecord, RefPosition* refPosition) { if ((physRegRecord->assignedInterval == nullptr) || !physRegRecord->assignedInterval->isConstant || (refPosition->refType != RefTypeDef)) { return false; } Interval* interval = refPosition->getInterval(); if (!interval->isConstant || !isRegConstant(physRegRecord->regNum, interval->registerType)) { return false; } noway_assert(refPosition->treeNode != nullptr); GenTree* otherTreeNode = physRegRecord->assignedInterval->firstRefPosition->treeNode; noway_assert(otherTreeNode != nullptr); if (refPosition->treeNode->OperGet() != otherTreeNode->OperGet()) { return false; } switch (otherTreeNode->OperGet()) { case GT_CNS_INT: { ssize_t v1 = refPosition->treeNode->AsIntCon()->IconValue(); ssize_t v2 = otherTreeNode->AsIntCon()->IconValue(); if ((v1 == v2) && (varTypeGCtype(refPosition->treeNode) == varTypeGCtype(otherTreeNode) || v1 == 0)) { #ifdef TARGET_64BIT // If the constant is negative, only reuse registers of the same type. // This is because, on a 64-bit system, we do not sign-extend immediates in registers to // 64-bits unless they are actually longs, as this requires a longer instruction. // This doesn't apply to a 32-bit system, on which long values occupy multiple registers. // (We could sign-extend, but we would have to always sign-extend, because if we reuse more // than once, we won't have access to the instruction that originally defines the constant). if ((refPosition->treeNode->TypeGet() == otherTreeNode->TypeGet()) || (v1 >= 0)) #endif // TARGET_64BIT { return true; } } break; } case GT_CNS_DBL: { // For floating point constants, the values must be identical, not simply compare // equal. So we compare the bits. if (refPosition->treeNode->AsDblCon()->isBitwiseEqual(otherTreeNode->AsDblCon()) && (refPosition->treeNode->TypeGet() == otherTreeNode->TypeGet())) { return true; } break; } default: break; } return false; } //------------------------------------------------------------------------ // allocateReg: Find a register that satisfies the requirements for refPosition, // taking into account the preferences for the given Interval, // and possibly spilling a lower weight Interval. // // Arguments: // currentInterval: The interval for the current allocation // refPosition: The RefPosition of the current Interval for which a register is being allocated // Return Value: // The regNumber, if any, allocated to the RefPosition. // Returns REG_NA only if 'refPosition->RegOptional()' is true, and there are // no free registers and no registers containing lower-weight Intervals that can be spilled. // // Notes: // This method will prefer to allocate a free register, but if none are available, // it will look for a lower-weight Interval to spill. // Weight and farthest distance of next reference are used to determine whether an Interval // currently occupying a register should be spilled. It will be spilled either: // - At its most recent RefPosition, if that is within the current block, OR // - At the boundary between the previous block and this one // // To select a ref position for spilling. // - If refPosition->RegOptional() == false // The RefPosition chosen for spilling will be the lowest weight // of all and if there is is more than one ref position with the // same lowest weight, among them choses the one with farthest // distance to its next reference. // // - If refPosition->RegOptional() == true // The ref position chosen for spilling will not only be lowest weight // of all but also has a weight lower than 'refPosition'. If there is // no such ref position, no register will be allocated. // regNumber LinearScan::allocateReg(Interval* currentInterval, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)) { regMaskTP foundRegBit = regSelector->select(currentInterval, refPosition DEBUG_ARG(registerScore)); if (foundRegBit == RBM_NONE) { return REG_NA; } regNumber foundReg = genRegNumFromMask(foundRegBit); RegRecord* availablePhysRegRecord = getRegisterRecord(foundReg); Interval* assignedInterval = availablePhysRegRecord->assignedInterval; if ((assignedInterval != currentInterval) && isAssigned(availablePhysRegRecord ARM_ARG(getRegisterType(currentInterval, refPosition)))) { if (regSelector->isSpilling()) { // We're spilling. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM if (currentInterval->registerType == TYP_DOUBLE) { assert(genIsValidDoubleReg(availablePhysRegRecord->regNum)); unassignDoublePhysReg(availablePhysRegRecord); } else if (assignedInterval->registerType == TYP_DOUBLE) { // Make sure we spill both halves of the double register. assert(genIsValidDoubleReg(assignedInterval->assignedReg->regNum)); unassignPhysReg(assignedInterval->assignedReg, assignedInterval->recentRefPosition); } else #endif { unassignPhysReg(availablePhysRegRecord, assignedInterval->recentRefPosition); } } else { // If we considered this "unassigned" because this interval's lifetime ends before // the next ref, remember it. // For historical reasons (due to former short-circuiting of this case), if we're reassigning // the current interval to a previous assignment, we don't remember the previous interval. // Note that we need to compute this condition before calling unassignPhysReg, which wil reset // assignedInterval->physReg. bool wasAssigned = regSelector->foundUnassignedReg() && (assignedInterval != nullptr) && (assignedInterval->physReg == foundReg); unassignPhysReg(availablePhysRegRecord ARM_ARG(currentInterval->registerType)); if (regSelector->isMatchingConstant() && compiler->opts.OptimizationEnabled()) { assert(assignedInterval->isConstant); refPosition->treeNode->SetReuseRegVal(); } else if (wasAssigned) { updatePreviousInterval(availablePhysRegRecord, assignedInterval, assignedInterval->registerType); } else { assert(!regSelector->isConstAvailable()); } } } assignPhysReg(availablePhysRegRecord, currentInterval); refPosition->registerAssignment = foundRegBit; return foundReg; } //------------------------------------------------------------------------ // canSpillReg: Determine whether we can spill physRegRecord // // Arguments: // physRegRecord - reg to spill // refLocation - Location of RefPosition where this register will be spilled // // Return Value: // True - if we can spill physRegRecord // False - otherwise // bool LinearScan::canSpillReg(RegRecord* physRegRecord, LsraLocation refLocation) { assert(physRegRecord->assignedInterval != nullptr); RefPosition* recentAssignedRef = physRegRecord->assignedInterval->recentRefPosition; if (recentAssignedRef != nullptr) { // We can't spill a register that's active at the current location. // We should already have determined this with isRegBusy before calling this method. assert(!isRefPositionActive(recentAssignedRef, refLocation)); return true; } // recentAssignedRef can only be null if this is a parameter that has not yet been // moved to a register (or stack), in which case we can't spill it yet. assert(physRegRecord->assignedInterval->getLocalVar(compiler)->lvIsParam); return false; } //------------------------------------------------------------------------ // getSpillWeight: Get the weight associated with spilling the given register // // Arguments: // physRegRecord - reg to spill // // Return Value: // The weight associated with the location at which we will spill. // // Note: This helper is designed to be used only from allocateReg() and getDoubleSpillWeight() // weight_t LinearScan::getSpillWeight(RegRecord* physRegRecord) { assert(physRegRecord->assignedInterval != nullptr); RefPosition* recentAssignedRef = physRegRecord->assignedInterval->recentRefPosition; weight_t weight = BB_ZERO_WEIGHT; // We shouldn't call this method if there is no recentAssignedRef. assert(recentAssignedRef != nullptr); // We shouldn't call this method if the register is active at this location. assert(!isRefPositionActive(recentAssignedRef, currentLoc)); weight = getWeight(recentAssignedRef); return weight; } #ifdef TARGET_ARM //------------------------------------------------------------------------ // canSpillDoubleReg: Determine whether we can spill physRegRecord // // Arguments: // physRegRecord - reg to spill (must be a valid double register) // refLocation - Location of RefPosition where this register will be spilled // // Return Value: // True - if we can spill physRegRecord // False - otherwise // bool LinearScan::canSpillDoubleReg(RegRecord* physRegRecord, LsraLocation refLocation) { assert(genIsValidDoubleReg(physRegRecord->regNum)); RegRecord* physRegRecord2 = getSecondHalfRegRec(physRegRecord); if ((physRegRecord->assignedInterval != nullptr) && !canSpillReg(physRegRecord, refLocation)) { return false; } if ((physRegRecord2->assignedInterval != nullptr) && !canSpillReg(physRegRecord2, refLocation)) { return false; } return true; } //------------------------------------------------------------------------ // unassignDoublePhysReg: unassign a double register (pair) // // Arguments: // doubleRegRecord - reg to unassign // // Note: // The given RegRecord must be a valid (even numbered) double register. // void LinearScan::unassignDoublePhysReg(RegRecord* doubleRegRecord) { assert(genIsValidDoubleReg(doubleRegRecord->regNum)); RegRecord* doubleRegRecordLo = doubleRegRecord; RegRecord* doubleRegRecordHi = getSecondHalfRegRec(doubleRegRecordLo); // For a double register, we has following four cases. // Case 1: doubleRegRecLo is assigned to TYP_DOUBLE interval // Case 2: doubleRegRecLo and doubleRegRecHi are assigned to different TYP_FLOAT intervals // Case 3: doubelRegRecLo is assgined to TYP_FLOAT interval and doubleRegRecHi is nullptr // Case 4: doubleRegRecordLo is nullptr, and doubleRegRecordHi is assigned to a TYP_FLOAT interval if (doubleRegRecordLo->assignedInterval != nullptr) { if (doubleRegRecordLo->assignedInterval->registerType == TYP_DOUBLE) { // Case 1: doubleRegRecLo is assigned to TYP_DOUBLE interval unassignPhysReg(doubleRegRecordLo, doubleRegRecordLo->assignedInterval->recentRefPosition); } else { // Case 2: doubleRegRecLo and doubleRegRecHi are assigned to different TYP_FLOAT intervals // Case 3: doubelRegRecLo is assgined to TYP_FLOAT interval and doubleRegRecHi is nullptr assert(doubleRegRecordLo->assignedInterval->registerType == TYP_FLOAT); unassignPhysReg(doubleRegRecordLo, doubleRegRecordLo->assignedInterval->recentRefPosition); if (doubleRegRecordHi != nullptr) { if (doubleRegRecordHi->assignedInterval != nullptr) { assert(doubleRegRecordHi->assignedInterval->registerType == TYP_FLOAT); unassignPhysReg(doubleRegRecordHi, doubleRegRecordHi->assignedInterval->recentRefPosition); } } } } else { // Case 4: doubleRegRecordLo is nullptr, and doubleRegRecordHi is assigned to a TYP_FLOAT interval assert(doubleRegRecordHi->assignedInterval != nullptr); assert(doubleRegRecordHi->assignedInterval->registerType == TYP_FLOAT); unassignPhysReg(doubleRegRecordHi, doubleRegRecordHi->assignedInterval->recentRefPosition); } } #endif // TARGET_ARM //------------------------------------------------------------------------ // isRefPositionActive: Determine whether a given RefPosition is active at the given location // // Arguments: // refPosition - the RefPosition of interest // refLocation - the LsraLocation at which we want to know if it is active // // Return Value: // True - if this RefPosition occurs at the given location, OR // if it occurs at the previous location and is marked delayRegFree. // False - otherwise // bool LinearScan::isRefPositionActive(RefPosition* refPosition, LsraLocation refLocation) { return (refPosition->nodeLocation == refLocation || ((refPosition->nodeLocation + 1 == refLocation) && refPosition->delayRegFree)); } //------------------------------------------------------------------------ // isSpillCandidate: Determine if a register is a spill candidate for a given RefPosition. // // Arguments: // current The interval for the current allocation // refPosition The RefPosition of the current Interval for which a register is being allocated // physRegRecord The RegRecord for the register we're considering for spill // // Return Value: // True iff the given register can be spilled to accommodate the given RefPosition. // bool LinearScan::isSpillCandidate(Interval* current, RefPosition* refPosition, RegRecord* physRegRecord) { regMaskTP candidateBit = genRegMask(physRegRecord->regNum); LsraLocation refLocation = refPosition->nodeLocation; // We shouldn't be calling this if we haven't already determined that the register is not // busy until the next kill. assert(!isRegBusy(physRegRecord->regNum, current->registerType)); // We should already have determined that the register isn't actively in use. assert(!isRegInUse(physRegRecord->regNum, current->registerType)); // We shouldn't be calling this if 'refPosition' is a fixed reference to this register. assert(!refPosition->isFixedRefOfRegMask(candidateBit)); // We shouldn't be calling this if there is a fixed reference at the same location // (and it's not due to this reference), as checked above. assert(!conflictingFixedRegReference(physRegRecord->regNum, refPosition)); bool canSpill; #ifdef TARGET_ARM if (current->registerType == TYP_DOUBLE) { canSpill = canSpillDoubleReg(physRegRecord, refLocation); } else #endif // TARGET_ARM { canSpill = canSpillReg(physRegRecord, refLocation); } if (!canSpill) { return false; } return true; } // Grab a register to use to copy and then immediately use. // This is called only for localVar intervals that already have a register // assignment that is not compatible with the current RefPosition. // This is not like regular assignment, because we don't want to change // any preferences or existing register assignments. // Prefer a free register that's got the earliest next use. // Otherwise, spill something with the farthest next use // regNumber LinearScan::assignCopyReg(RefPosition* refPosition) { Interval* currentInterval = refPosition->getInterval(); assert(currentInterval != nullptr); assert(currentInterval->isActive); // Save the relatedInterval, if any, so that it doesn't get modified during allocation. Interval* savedRelatedInterval = currentInterval->relatedInterval; currentInterval->relatedInterval = nullptr; // We don't want really want to change the default assignment, // so 1) pretend this isn't active, and 2) remember the old reg regNumber oldPhysReg = currentInterval->physReg; RegRecord* oldRegRecord = currentInterval->assignedReg; assert(oldRegRecord->regNum == oldPhysReg); currentInterval->isActive = false; // We *must* allocate a register, and it will be a copyReg. Set that field now, so that // refPosition->RegOptional() will return false. refPosition->copyReg = true; RegisterScore registerScore = NONE; regNumber allocatedReg = allocateReg(currentInterval, refPosition DEBUG_ARG(&registerScore)); assert(allocatedReg != REG_NA); INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_COPY_REG, currentInterval, allocatedReg, nullptr, registerScore)); // Now restore the old info currentInterval->relatedInterval = savedRelatedInterval; currentInterval->physReg = oldPhysReg; currentInterval->assignedReg = oldRegRecord; currentInterval->isActive = true; return allocatedReg; } //------------------------------------------------------------------------ // isAssigned: This is the function to check if the given RegRecord has an assignedInterval. // // Arguments: // regRec - The RegRecord to check that it is assigned. // newRegType - There are elements to judge according to the upcoming register type. // // Return Value: // Returns true if the given RegRecord has an assignedInterval. // bool LinearScan::isAssigned(RegRecord* regRec ARM_ARG(RegisterType newRegType)) { if (regRec->assignedInterval != nullptr) { return true; } #ifdef TARGET_ARM if (newRegType == TYP_DOUBLE) { RegRecord* otherRegRecord = getSecondHalfRegRec(regRec); if (otherRegRecord->assignedInterval != nullptr) { return true; } } #endif return false; } //------------------------------------------------------------------------ // checkAndAssignInterval: Check if the interval is already assigned and // if it is then unassign the physical record // and set the assignedInterval to 'interval' // // Arguments: // regRec - The RegRecord of interest // interval - The Interval that we're going to assign to 'regRec' // void LinearScan::checkAndAssignInterval(RegRecord* regRec, Interval* interval) { Interval* assignedInterval = regRec->assignedInterval; if (assignedInterval != nullptr && assignedInterval != interval) { // This is allocated to another interval. Either it is inactive, or it was allocated as a // copyReg and is therefore not the "assignedReg" of the other interval. In the latter case, // we simply unassign it - in the former case we need to set the physReg on the interval to // REG_NA to indicate that it is no longer in that register. // The lack of checking for this case resulted in an assert in the retail version of System.dll, // in method SerialStream.GetDcbFlag. // Note that we can't check for the copyReg case, because we may have seen a more recent // RefPosition for the Interval that was NOT a copyReg. if (assignedInterval->assignedReg == regRec) { assert(assignedInterval->isActive == false); assignedInterval->physReg = REG_NA; } unassignPhysReg(regRec->regNum); } #ifdef TARGET_ARM // If 'interval' and 'assignedInterval' were both TYP_DOUBLE, then we have unassigned 'assignedInterval' // from both halves. Otherwise, if 'interval' is TYP_DOUBLE, we now need to unassign the other half. if ((interval->registerType == TYP_DOUBLE) && ((assignedInterval == nullptr) || (assignedInterval->registerType == TYP_FLOAT))) { RegRecord* otherRegRecord = getSecondHalfRegRec(regRec); assignedInterval = otherRegRecord->assignedInterval; if (assignedInterval != nullptr && assignedInterval != interval) { if (assignedInterval->assignedReg == otherRegRecord) { assert(assignedInterval->isActive == false); assignedInterval->physReg = REG_NA; } unassignPhysReg(otherRegRecord->regNum); } } #endif updateAssignedInterval(regRec, interval, interval->registerType); } // Assign the given physical register interval to the given interval void LinearScan::assignPhysReg(RegRecord* regRec, Interval* interval) { regMaskTP assignedRegMask = genRegMask(regRec->regNum); compiler->codeGen->regSet.rsSetRegsModified(assignedRegMask DEBUGARG(true)); interval->assignedReg = regRec; checkAndAssignInterval(regRec, interval); interval->physReg = regRec->regNum; interval->isActive = true; if (interval->isLocalVar) { // Prefer this register for future references interval->updateRegisterPreferences(assignedRegMask); } } //------------------------------------------------------------------------ // setIntervalAsSplit: Set this Interval as being split // // Arguments: // interval - The Interval which is being split // // Return Value: // None. // // Notes: // The given Interval will be marked as split, and it will be added to the // set of splitOrSpilledVars. // // Assumptions: // "interval" must be a lclVar interval, as tree temps are never split. // This is asserted in the call to getVarIndex(). // void LinearScan::setIntervalAsSplit(Interval* interval) { if (interval->isLocalVar) { unsigned varIndex = interval->getVarIndex(compiler); if (!interval->isSplit) { VarSetOps::AddElemD(compiler, splitOrSpilledVars, varIndex); } else { assert(VarSetOps::IsMember(compiler, splitOrSpilledVars, varIndex)); } } interval->isSplit = true; } //------------------------------------------------------------------------ // setIntervalAsSpilled: Set this Interval as being spilled // // Arguments: // interval - The Interval which is being spilled // // Return Value: // None. // // Notes: // The given Interval will be marked as spilled, and it will be added // to the set of splitOrSpilledVars. // void LinearScan::setIntervalAsSpilled(Interval* interval) { #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE if (interval->isUpperVector) { assert(interval->relatedInterval->isLocalVar); interval->isSpilled = true; // Now we need to mark the local as spilled also, even if the lower half is never spilled, // as this will use the upper part of its home location. interval = interval->relatedInterval; // We'll now mark this as spilled, so it changes the spillCost. RefPosition* recentRefPos = interval->recentRefPosition; if (!interval->isSpilled && interval->isActive && (recentRefPos != nullptr)) { VarSetOps::AddElemD(compiler, splitOrSpilledVars, interval->getVarIndex(compiler)); interval->isSpilled = true; regNumber reg = interval->physReg; spillCost[reg] = getSpillWeight(getRegisterRecord(reg)); } } #endif if (interval->isLocalVar) { unsigned varIndex = interval->getVarIndex(compiler); if (!interval->isSpilled) { VarSetOps::AddElemD(compiler, splitOrSpilledVars, varIndex); } else { assert(VarSetOps::IsMember(compiler, splitOrSpilledVars, varIndex)); } } interval->isSpilled = true; } //------------------------------------------------------------------------ // spill: Spill the "interval" starting from "fromRefPosition" (upto "toRefPosition") // // Arguments: // interval - The interval that contains the RefPosition to be spilled // fromRefPosition - The RefPosition at which the Interval is to be spilled // toRefPosition - The RefPosition at which it must be reloaded (debug only arg) // // Return Value: // None. // // Assumptions: // fromRefPosition and toRefPosition must not be null // void LinearScan::spillInterval(Interval* interval, RefPosition* fromRefPosition DEBUGARG(RefPosition* toRefPosition)) { assert(fromRefPosition != nullptr && toRefPosition != nullptr); assert(fromRefPosition->getInterval() == interval && toRefPosition->getInterval() == interval); assert(fromRefPosition->nextRefPosition == toRefPosition); if (!fromRefPosition->lastUse) { // If not allocated a register, Lcl var def/use ref positions even if reg optional // should be marked as spillAfter. Note that if it is a WriteThru interval, the value is always // written to the stack, but the WriteThru indicates that the register is no longer live. if (fromRefPosition->RegOptional() && !(interval->isLocalVar && fromRefPosition->IsActualRef())) { fromRefPosition->registerAssignment = RBM_NONE; } else { fromRefPosition->spillAfter = true; } } // Only handle the singledef intervals whose firstRefPosition is RefTypeDef and is not yet marked as spillAfter. // The singledef intervals whose firstRefPositions are already marked as spillAfter, no need to mark them as // singleDefSpill because they will always get spilled at firstRefPosition. // This helps in spilling the singleDef at definition // // Note: Only mark "singleDefSpill" for those intervals who ever get spilled. The intervals that are never spilled // will not be marked as "singleDefSpill" and hence won't get spilled at the first definition. if (interval->isSingleDef && RefTypeIsDef(interval->firstRefPosition->refType) && !interval->firstRefPosition->spillAfter) { // TODO-CQ: Check if it is beneficial to spill at def, meaning, if it is a hot block don't worry about // doing the spill. Another option is to track number of refpositions and a interval has more than X // refpositions // then perform this optimization. interval->firstRefPosition->singleDefSpill = true; } assert(toRefPosition != nullptr); #ifdef DEBUG if (VERBOSE) { dumpLsraAllocationEvent(LSRA_EVENT_SPILL, interval); } #endif // DEBUG INTRACK_STATS(updateLsraStat(STAT_SPILL, fromRefPosition->bbNum)); interval->isActive = false; setIntervalAsSpilled(interval); // If fromRefPosition occurs before the beginning of this block, mark this as living in the stack // on entry to this block. if (fromRefPosition->nodeLocation <= curBBStartLocation) { // This must be a lclVar interval assert(interval->isLocalVar); setInVarRegForBB(curBBNum, interval->varNum, REG_STK); } } //------------------------------------------------------------------------ // unassignPhysRegNoSpill: Unassign the given physical register record from // an active interval, without spilling. // // Arguments: // regRec - the RegRecord to be unassigned // // Return Value: // None. // // Assumptions: // The assignedInterval must not be null, and must be active. // // Notes: // This method is used to unassign a register when an interval needs to be moved to a // different register, but not (yet) spilled. void LinearScan::unassignPhysRegNoSpill(RegRecord* regRec) { Interval* assignedInterval = regRec->assignedInterval; assert(assignedInterval != nullptr && assignedInterval->isActive); assignedInterval->isActive = false; unassignPhysReg(regRec, nullptr); assignedInterval->isActive = true; } //------------------------------------------------------------------------ // checkAndClearInterval: Clear the assignedInterval for the given // physical register record // // Arguments: // regRec - the physical RegRecord to be unassigned // spillRefPosition - The RefPosition at which the assignedInterval is to be spilled // or nullptr if we aren't spilling // // Return Value: // None. // // Assumptions: // see unassignPhysReg // void LinearScan::checkAndClearInterval(RegRecord* regRec, RefPosition* spillRefPosition) { Interval* assignedInterval = regRec->assignedInterval; assert(assignedInterval != nullptr); regNumber thisRegNum = regRec->regNum; if (spillRefPosition == nullptr) { // Note that we can't assert for the copyReg case // if (assignedInterval->physReg == thisRegNum) { assert(assignedInterval->isActive == false); } } else { assert(spillRefPosition->getInterval() == assignedInterval); } updateAssignedInterval(regRec, nullptr, assignedInterval->registerType); } //------------------------------------------------------------------------ // unassignPhysReg: Unassign the given physical register record, and spill the // assignedInterval at the given spillRefPosition, if any. // // Arguments: // regRec - The RegRecord to be unassigned // newRegType - The RegisterType of interval that would be assigned // // Return Value: // None. // // Notes: // On ARM architecture, Intervals have to be unassigned considering // with the register type of interval that would be assigned. // void LinearScan::unassignPhysReg(RegRecord* regRec ARM_ARG(RegisterType newRegType)) { RegRecord* regRecToUnassign = regRec; #ifdef TARGET_ARM RegRecord* anotherRegRec = nullptr; if ((regRecToUnassign->assignedInterval != nullptr) && (regRecToUnassign->assignedInterval->registerType == TYP_DOUBLE)) { // If the register type of interval(being unassigned or new) is TYP_DOUBLE, // It should have to be valid double register (even register) if (!genIsValidDoubleReg(regRecToUnassign->regNum)) { regRecToUnassign = findAnotherHalfRegRec(regRec); } } else { if (newRegType == TYP_DOUBLE) { anotherRegRec = getSecondHalfRegRec(regRecToUnassign); } } #endif if (regRecToUnassign->assignedInterval != nullptr) { unassignPhysReg(regRecToUnassign, regRecToUnassign->assignedInterval->recentRefPosition); } #ifdef TARGET_ARM if ((anotherRegRec != nullptr) && (anotherRegRec->assignedInterval != nullptr)) { unassignPhysReg(anotherRegRec, anotherRegRec->assignedInterval->recentRefPosition); } #endif } //------------------------------------------------------------------------ // unassignPhysReg: Unassign the given physical register record, and spill the // assignedInterval at the given spillRefPosition, if any. // // Arguments: // regRec - the RegRecord to be unassigned // spillRefPosition - The RefPosition at which the assignedInterval is to be spilled // // Return Value: // None. // // Assumptions: // The assignedInterval must not be null. // If spillRefPosition is null, the assignedInterval must be inactive, or not currently // assigned to this register (e.g. this is a copyReg for that Interval). // Otherwise, spillRefPosition must be associated with the assignedInterval. // void LinearScan::unassignPhysReg(RegRecord* regRec, RefPosition* spillRefPosition) { Interval* assignedInterval = regRec->assignedInterval; assert(assignedInterval != nullptr); assert(spillRefPosition == nullptr || spillRefPosition->getInterval() == assignedInterval); regNumber thisRegNum = regRec->regNum; // Is assignedInterval actually still assigned to this register? bool intervalIsAssigned = (assignedInterval->physReg == thisRegNum); regNumber regToUnassign = thisRegNum; #ifdef TARGET_ARM RegRecord* anotherRegRec = nullptr; // Prepare second half RegRecord of a double register for TYP_DOUBLE if (assignedInterval->registerType == TYP_DOUBLE) { assert(isFloatRegType(regRec->registerType)); RegRecord* doubleRegRec; if (genIsValidDoubleReg(thisRegNum)) { anotherRegRec = getSecondHalfRegRec(regRec); doubleRegRec = regRec; } else { regToUnassign = REG_PREV(thisRegNum); anotherRegRec = getRegisterRecord(regToUnassign); doubleRegRec = anotherRegRec; } // Both RegRecords should have been assigned to the same interval. assert(assignedInterval == anotherRegRec->assignedInterval); if (!intervalIsAssigned && (assignedInterval->physReg == anotherRegRec->regNum)) { intervalIsAssigned = true; } clearNextIntervalRef(regToUnassign, TYP_DOUBLE); clearSpillCost(regToUnassign, TYP_DOUBLE); checkAndClearInterval(doubleRegRec, spillRefPosition); // Both RegRecords should have been unassigned together. assert(regRec->assignedInterval == nullptr); assert(anotherRegRec->assignedInterval == nullptr); } else #endif // TARGET_ARM { clearNextIntervalRef(thisRegNum, assignedInterval->registerType); clearSpillCost(thisRegNum, assignedInterval->registerType); checkAndClearInterval(regRec, spillRefPosition); } makeRegAvailable(regToUnassign, assignedInterval->registerType); RefPosition* nextRefPosition = nullptr; if (spillRefPosition != nullptr) { nextRefPosition = spillRefPosition->nextRefPosition; } if (!intervalIsAssigned && assignedInterval->physReg != REG_NA) { // This must have been a temporary copy reg, but we can't assert that because there // may have been intervening RefPositions that were not copyRegs. // reg->assignedInterval has already been set to nullptr by checkAndClearInterval() assert(regRec->assignedInterval == nullptr); return; } // regNumber victimAssignedReg = assignedInterval->physReg; assignedInterval->physReg = REG_NA; bool spill = assignedInterval->isActive && nextRefPosition != nullptr; if (spill) { // If this is an active interval, it must have a recentRefPosition, // otherwise it would not be active assert(spillRefPosition != nullptr); #if 0 // TODO-CQ: Enable this and insert an explicit GT_COPY (otherwise there's no way to communicate // to codegen that we want the copyReg to be the new home location). // If the last reference was a copyReg, and we're spilling the register // it was copied from, then make the copyReg the new primary location // if possible if (spillRefPosition->copyReg) { regNumber copyFromRegNum = victimAssignedReg; regNumber copyRegNum = genRegNumFromMask(spillRefPosition->registerAssignment); if (copyFromRegNum == thisRegNum && getRegisterRecord(copyRegNum)->assignedInterval == assignedInterval) { assert(copyRegNum != thisRegNum); assignedInterval->physReg = copyRegNum; assignedInterval->assignedReg = this->getRegisterRecord(copyRegNum); return; } } #endif // 0 #ifdef DEBUG // With JitStressRegs == 0x80 (LSRA_EXTEND_LIFETIMES), we may have a RefPosition // that is not marked lastUse even though the treeNode is a lastUse. In that case // we must not mark it for spill because the register will have been immediately freed // after use. While we could conceivably add special handling for this case in codegen, // it would be messy and undesirably cause the "bleeding" of LSRA stress modes outside // of LSRA. if (extendLifetimes() && assignedInterval->isLocalVar && RefTypeIsUse(spillRefPosition->refType) && spillRefPosition->treeNode != nullptr && spillRefPosition->treeNode->AsLclVar()->IsLastUse(spillRefPosition->multiRegIdx)) { dumpLsraAllocationEvent(LSRA_EVENT_SPILL_EXTENDED_LIFETIME, assignedInterval); assignedInterval->isActive = false; spill = false; // If the spillRefPosition occurs before the beginning of this block, it will have // been marked as living in this register on entry to this block, but we now need // to mark this as living on the stack. if (spillRefPosition->nodeLocation <= curBBStartLocation) { setInVarRegForBB(curBBNum, assignedInterval->varNum, REG_STK); if (spillRefPosition->nextRefPosition != nullptr) { setIntervalAsSpilled(assignedInterval); } } else { // Otherwise, we need to mark spillRefPosition as lastUse, or the interval // will remain active beyond its allocated range during the resolution phase. spillRefPosition->lastUse = true; } } else #endif // DEBUG { spillInterval(assignedInterval, spillRefPosition DEBUGARG(nextRefPosition)); } } // Maintain the association with the interval, if it has more references. // Or, if we "remembered" an interval assigned to this register, restore it. if (nextRefPosition != nullptr) { assignedInterval->assignedReg = regRec; } else if (canRestorePreviousInterval(regRec, assignedInterval)) { regRec->assignedInterval = regRec->previousInterval; regRec->previousInterval = nullptr; if (regRec->assignedInterval->physReg != thisRegNum) { clearNextIntervalRef(thisRegNum, regRec->assignedInterval->registerType); } else { updateNextIntervalRef(thisRegNum, regRec->assignedInterval); } #ifdef TARGET_ARM // Note: // We can not use updateAssignedInterval() and updatePreviousInterval() here, // because regRec may not be a even-numbered float register. // Update second half RegRecord of a double register for TYP_DOUBLE if (regRec->assignedInterval->registerType == TYP_DOUBLE) { RegRecord* anotherHalfRegRec = findAnotherHalfRegRec(regRec); anotherHalfRegRec->assignedInterval = regRec->assignedInterval; anotherHalfRegRec->previousInterval = nullptr; } #endif // TARGET_ARM #ifdef DEBUG if (spill) { dumpLsraAllocationEvent(LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL_AFTER_SPILL, regRec->assignedInterval, thisRegNum); } else { dumpLsraAllocationEvent(LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL, regRec->assignedInterval, thisRegNum); } #endif // DEBUG } else { updateAssignedInterval(regRec, nullptr, assignedInterval->registerType); updatePreviousInterval(regRec, nullptr, assignedInterval->registerType); } } //------------------------------------------------------------------------ // spillGCRefs: Spill any GC-type intervals that are currently in registers. // // Arguments: // killRefPosition - The RefPosition for the kill // // Return Value: // None. // // Notes: // This is used to ensure that we have no live GC refs in registers at an // unmanaged call. // void LinearScan::spillGCRefs(RefPosition* killRefPosition) { // For each physical register that can hold a GC type, // if it is occupied by an interval of a GC type, spill that interval. regMaskTP candidateRegs = killRefPosition->registerAssignment; INDEBUG(bool killedRegs = false); while (candidateRegs != RBM_NONE) { regMaskTP nextRegBit = genFindLowestBit(candidateRegs); candidateRegs &= ~nextRegBit; regNumber nextReg = genRegNumFromMask(nextRegBit); RegRecord* regRecord = getRegisterRecord(nextReg); Interval* assignedInterval = regRecord->assignedInterval; if (assignedInterval == nullptr || (assignedInterval->isActive == false)) { continue; } bool needsKill = varTypeIsGC(assignedInterval->registerType); if (!needsKill) { // The importer will assign a GC type to the rhs of an assignment if the lhs type is a GC type, // even if the rhs is not. See the CEE_STLOC* case in impImportBlockCode(). As a result, // we can have a 'GT_LCL_VAR' node with a GC type, when the lclVar itself is an integer type. // The emitter will mark this register as holding a GC type. Therfore we must spill this value. // This was exposed on Arm32 with EH write-thru. if ((assignedInterval->recentRefPosition != nullptr) && (assignedInterval->recentRefPosition->treeNode != nullptr)) { needsKill = varTypeIsGC(assignedInterval->recentRefPosition->treeNode); } } if (needsKill) { INDEBUG(killedRegs = true); unassignPhysReg(regRecord, assignedInterval->recentRefPosition); makeRegAvailable(nextReg, assignedInterval->registerType); } } INDEBUG(dumpLsraAllocationEvent(killedRegs ? LSRA_EVENT_DONE_KILL_GC_REFS : LSRA_EVENT_NO_GC_KILLS, nullptr, REG_NA, nullptr)); } //------------------------------------------------------------------------ // processBlockEndAllocation: Update var locations after 'currentBlock' has been allocated // // Arguments: // currentBlock - the BasicBlock we have just finished allocating registers for // // Return Value: // None // // Notes: // Calls processBlockEndLocations() to set the outVarToRegMap, then gets the next block, // and sets the inVarToRegMap appropriately. void LinearScan::processBlockEndAllocation(BasicBlock* currentBlock) { assert(currentBlock != nullptr); if (enregisterLocalVars) { processBlockEndLocations(currentBlock); } markBlockVisited(currentBlock); // Get the next block to allocate. // When the last block in the method has successors, there will be a final "RefTypeBB" to // ensure that we get the varToRegMap set appropriately, but in that case we don't need // to worry about "nextBlock". BasicBlock* nextBlock = getNextBlock(); if (nextBlock != nullptr) { processBlockStartLocations(nextBlock); } } //------------------------------------------------------------------------ // rotateBlockStartLocation: When in the LSRA_BLOCK_BOUNDARY_ROTATE stress mode, attempt to // "rotate" the register assignment for a localVar to the next higher // register that is available. // // Arguments: // interval - the Interval for the variable whose register is getting rotated // targetReg - its register assignment from the predecessor block being used for live-in // availableRegs - registers available for use // // Return Value: // The new register to use. #ifdef DEBUG regNumber LinearScan::rotateBlockStartLocation(Interval* interval, regNumber targetReg, regMaskTP availableRegs) { if (targetReg != REG_STK && getLsraBlockBoundaryLocations() == LSRA_BLOCK_BOUNDARY_ROTATE) { // If we're rotating the register locations at block boundaries, try to use // the next higher register number of the appropriate register type. regMaskTP candidateRegs = allRegs(interval->registerType) & availableRegs; regNumber firstReg = REG_NA; regNumber newReg = REG_NA; while (candidateRegs != RBM_NONE) { regMaskTP nextRegBit = genFindLowestBit(candidateRegs); candidateRegs &= ~nextRegBit; regNumber nextReg = genRegNumFromMask(nextRegBit); if (nextReg > targetReg) { newReg = nextReg; break; } else if (firstReg == REG_NA) { firstReg = nextReg; } } if (newReg == REG_NA) { assert(firstReg != REG_NA); newReg = firstReg; } targetReg = newReg; } return targetReg; } #endif // DEBUG #ifdef TARGET_ARM //-------------------------------------------------------------------------------------- // isSecondHalfReg: Test if recRec is second half of double register // which is assigned to an interval. // // Arguments: // regRec - a register to be tested // interval - an interval which is assigned to some register // // Assumptions: // None // // Return Value: // True only if regRec is second half of assignedReg in interval // bool LinearScan::isSecondHalfReg(RegRecord* regRec, Interval* interval) { RegRecord* assignedReg = interval->assignedReg; if (assignedReg != nullptr && interval->registerType == TYP_DOUBLE) { // interval should have been allocated to a valid double register assert(genIsValidDoubleReg(assignedReg->regNum)); // Find a second half RegRecord of double register regNumber firstRegNum = assignedReg->regNum; regNumber secondRegNum = REG_NEXT(firstRegNum); assert(genIsValidFloatReg(secondRegNum) && !genIsValidDoubleReg(secondRegNum)); RegRecord* secondRegRec = getRegisterRecord(secondRegNum); return secondRegRec == regRec; } return false; } //------------------------------------------------------------------------------------------ // getSecondHalfRegRec: Get the second (odd) half of an ARM32 double register // // Arguments: // regRec - A float RegRecord // // Assumptions: // regRec must be a valid double register (i.e. even) // // Return Value: // The RegRecord for the second half of the double register // RegRecord* LinearScan::getSecondHalfRegRec(RegRecord* regRec) { regNumber secondHalfRegNum; RegRecord* secondHalfRegRec; assert(genIsValidDoubleReg(regRec->regNum)); secondHalfRegNum = REG_NEXT(regRec->regNum); secondHalfRegRec = getRegisterRecord(secondHalfRegNum); return secondHalfRegRec; } //------------------------------------------------------------------------------------------ // findAnotherHalfRegRec: Find another half RegRecord which forms same ARM32 double register // // Arguments: // regRec - A float RegRecord // // Assumptions: // None // // Return Value: // A RegRecord which forms same double register with regRec // RegRecord* LinearScan::findAnotherHalfRegRec(RegRecord* regRec) { regNumber anotherHalfRegNum = findAnotherHalfRegNum(regRec->regNum); return getRegisterRecord(anotherHalfRegNum); } //------------------------------------------------------------------------------------------ // findAnotherHalfRegNum: Find another half register's number which forms same ARM32 double register // // Arguments: // regNumber - A float regNumber // // Assumptions: // None // // Return Value: // A register number which forms same double register with regNum. // regNumber LinearScan::findAnotherHalfRegNum(regNumber regNum) { regNumber anotherHalfRegNum; assert(genIsValidFloatReg(regNum)); // Find another half register for TYP_DOUBLE interval, // following same logic in canRestorePreviousInterval(). if (genIsValidDoubleReg(regNum)) { anotherHalfRegNum = REG_NEXT(regNum); assert(!genIsValidDoubleReg(anotherHalfRegNum)); } else { anotherHalfRegNum = REG_PREV(regNum); assert(genIsValidDoubleReg(anotherHalfRegNum)); } return anotherHalfRegNum; } #endif //-------------------------------------------------------------------------------------- // canRestorePreviousInterval: Test if we can restore previous interval // // Arguments: // regRec - a register which contains previous interval to be restored // assignedInterval - an interval just unassigned // // Assumptions: // None // // Return Value: // True only if previous interval of regRec can be restored // bool LinearScan::canRestorePreviousInterval(RegRecord* regRec, Interval* assignedInterval) { bool retVal = (regRec->previousInterval != nullptr && regRec->previousInterval != assignedInterval && regRec->previousInterval->assignedReg == regRec && regRec->previousInterval->getNextRefPosition() != nullptr); #ifdef TARGET_ARM if (retVal && regRec->previousInterval->registerType == TYP_DOUBLE) { RegRecord* anotherHalfRegRec = findAnotherHalfRegRec(regRec); retVal = retVal && anotherHalfRegRec->assignedInterval == nullptr; } #endif return retVal; } bool LinearScan::isAssignedToInterval(Interval* interval, RegRecord* regRec) { bool isAssigned = (interval->assignedReg == regRec); #ifdef TARGET_ARM isAssigned |= isSecondHalfReg(regRec, interval); #endif return isAssigned; } void LinearScan::unassignIntervalBlockStart(RegRecord* regRecord, VarToRegMap inVarToRegMap) { // Is there another interval currently assigned to this register? If so unassign it. Interval* assignedInterval = regRecord->assignedInterval; if (assignedInterval != nullptr) { if (isAssignedToInterval(assignedInterval, regRecord)) { // Only localVars, constants or vector upper halves should be assigned to registers at block boundaries. if (!assignedInterval->isLocalVar) { assert(assignedInterval->isConstant || assignedInterval->IsUpperVector()); // Don't need to update the VarToRegMap. inVarToRegMap = nullptr; } regNumber assignedRegNum = assignedInterval->assignedReg->regNum; // If the interval is active, it will be set to active when we reach its new // register assignment (which we must not yet have done, or it wouldn't still be // assigned to this register). assignedInterval->isActive = false; unassignPhysReg(assignedInterval->assignedReg, nullptr); if ((inVarToRegMap != nullptr) && inVarToRegMap[assignedInterval->getVarIndex(compiler)] == assignedRegNum) { inVarToRegMap[assignedInterval->getVarIndex(compiler)] = REG_STK; } } else { // This interval is no longer assigned to this register. updateAssignedInterval(regRecord, nullptr, assignedInterval->registerType); } } } //------------------------------------------------------------------------ // processBlockStartLocations: Update var locations on entry to 'currentBlock' and clear constant // registers. // // Arguments: // currentBlock - the BasicBlock we are about to allocate registers for // // Return Value: // None // // Notes: // During the allocation pass (allocationPassComplete = false), we use the outVarToRegMap // of the selected predecessor to determine the lclVar locations for the inVarToRegMap. // During the resolution (write-back when allocationPassComplete = true) pass, we only // modify the inVarToRegMap in cases where a lclVar was spilled after the block had been // completed. void LinearScan::processBlockStartLocations(BasicBlock* currentBlock) { // If we have no register candidates we should only call this method during allocation. assert(enregisterLocalVars || !allocationPassComplete); if (!enregisterLocalVars) { // Just clear any constant registers and return. resetAvailableRegs(); for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg)) { RegRecord* physRegRecord = getRegisterRecord(reg); Interval* assignedInterval = physRegRecord->assignedInterval; clearNextIntervalRef(reg, physRegRecord->registerType); clearSpillCost(reg, physRegRecord->registerType); if (assignedInterval != nullptr) { assert(assignedInterval->isConstant); physRegRecord->assignedInterval = nullptr; } } return; } unsigned predBBNum = blockInfo[currentBlock->bbNum].predBBNum; VarToRegMap predVarToRegMap = getOutVarToRegMap(predBBNum); VarToRegMap inVarToRegMap = getInVarToRegMap(currentBlock->bbNum); // If this block enters an exception region, all incoming vars are on the stack. if (predBBNum == 0) { #if DEBUG if (blockInfo[currentBlock->bbNum].hasEHBoundaryIn || !allocationPassComplete) { // This should still be in its initialized empty state. for (unsigned varIndex = 0; varIndex < compiler->lvaTrackedCount; varIndex++) { // In the case where we're extending lifetimes for stress, we are intentionally modeling variables // as live when they really aren't to create extra register pressure & constraints. // However, this means that non-EH-vars will be live into EH regions. We can and should ignore the // locations of these. Note that they aren't reported to codegen anyway. if (!getLsraExtendLifeTimes() || VarSetOps::IsMember(compiler, currentBlock->bbLiveIn, varIndex)) { assert(inVarToRegMap[varIndex] == REG_STK); } } } #endif // DEBUG predVarToRegMap = inVarToRegMap; } VarSetOps::AssignNoCopy(compiler, currentLiveVars, VarSetOps::Intersection(compiler, registerCandidateVars, currentBlock->bbLiveIn)); #ifdef DEBUG if (getLsraExtendLifeTimes()) { VarSetOps::AssignNoCopy(compiler, currentLiveVars, registerCandidateVars); } // If we are rotating register assignments at block boundaries, we want to make the // inactive registers available for the rotation. regMaskTP inactiveRegs = RBM_NONE; #endif // DEBUG regMaskTP liveRegs = RBM_NONE; VarSetOps::Iter iter(compiler, currentLiveVars); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { if (!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate) { continue; } regNumber targetReg; Interval* interval = getIntervalForLocalVar(varIndex); RefPosition* nextRefPosition = interval->getNextRefPosition(); assert((nextRefPosition != nullptr) || (interval->isWriteThru)); bool leaveOnStack = false; // Special handling for variables live in/out of exception handlers. if (interval->isWriteThru) { // There are 3 cases where we will leave writethru lclVars on the stack: // 1) There is no predecessor. // 2) It is conservatively or artificially live - that is, it has no next use, // so there is no place for codegen to record that the register is no longer occupied. // 3) This block has a predecessor with an outgoing EH edge. We won't be able to add "join" // resolution to load the EH var into a register along that edge, so it must be on stack. if ((predBBNum == 0) || (nextRefPosition == nullptr) || (RefTypeIsDef(nextRefPosition->refType)) || blockInfo[currentBlock->bbNum].hasEHPred) { leaveOnStack = true; } } if (!allocationPassComplete) { targetReg = getVarReg(predVarToRegMap, varIndex); if (leaveOnStack) { targetReg = REG_STK; } #ifdef DEBUG regNumber newTargetReg = rotateBlockStartLocation(interval, targetReg, (~liveRegs | inactiveRegs)); if (newTargetReg != targetReg) { targetReg = newTargetReg; setIntervalAsSplit(interval); } #endif // DEBUG setVarReg(inVarToRegMap, varIndex, targetReg); } else // allocationPassComplete (i.e. resolution/write-back pass) { targetReg = getVarReg(inVarToRegMap, varIndex); // There are four cases that we need to consider during the resolution pass: // 1. This variable had a register allocated initially, and it was not spilled in the RefPosition // that feeds this block. In this case, both targetReg and predVarToRegMap[varIndex] will be targetReg. // 2. This variable had not been spilled prior to the end of predBB, but was later spilled, so // predVarToRegMap[varIndex] will be REG_STK, but targetReg is its former allocated value. // In this case, we will normally change it to REG_STK. We will update its "spilled" status when we // encounter it in resolveLocalRef(). // 2a. If the next RefPosition is marked as a copyReg, we need to retain the allocated register. This is // because the copyReg RefPosition will not have recorded the "home" register, yet downstream // RefPositions rely on the correct "home" register. // 3. This variable was spilled before we reached the end of predBB. In this case, both targetReg and // predVarToRegMap[varIndex] will be REG_STK, and the next RefPosition will have been marked // as reload during allocation time if necessary (note that by the time we actually reach the next // RefPosition, we may be using a different predecessor, at which it is still in a register). // 4. This variable was spilled during the allocation of this block, so targetReg is REG_STK // (because we set inVarToRegMap at the time we spilled it), but predVarToRegMap[varIndex] // is not REG_STK. We retain the REG_STK value in the inVarToRegMap. if (targetReg != REG_STK) { if (getVarReg(predVarToRegMap, varIndex) != REG_STK) { // Case #1 above. assert(getVarReg(predVarToRegMap, varIndex) == targetReg || getLsraBlockBoundaryLocations() == LSRA_BLOCK_BOUNDARY_ROTATE); } else if (!nextRefPosition->copyReg) { // case #2 above. setVarReg(inVarToRegMap, varIndex, REG_STK); targetReg = REG_STK; } // Else case 2a. - retain targetReg. } // Else case #3 or #4, we retain targetReg and nothing further to do or assert. } if (interval->physReg == targetReg) { if (interval->isActive) { assert(targetReg != REG_STK); assert(interval->assignedReg != nullptr && interval->assignedReg->regNum == targetReg && interval->assignedReg->assignedInterval == interval); liveRegs |= getRegMask(targetReg, interval->registerType); continue; } } else if (interval->physReg != REG_NA) { // This can happen if we are using the locations from a basic block other than the // immediately preceding one - where the variable was in a different location. if ((targetReg != REG_STK) || leaveOnStack) { // Unassign it from the register (it may get a new register below). if (interval->assignedReg != nullptr && interval->assignedReg->assignedInterval == interval) { interval->isActive = false; unassignPhysReg(getRegisterRecord(interval->physReg), nullptr); } else { // This interval was live in this register the last time we saw a reference to it, // but has since been displaced. interval->physReg = REG_NA; } } else if (!allocationPassComplete) { // Keep the register assignment - if another var has it, it will get unassigned. // Otherwise, resolution will fix it up later, and it will be more // likely to match other assignments this way. targetReg = interval->physReg; interval->isActive = true; liveRegs |= getRegMask(targetReg, interval->registerType); INDEBUG(inactiveRegs |= genRegMask(targetReg)); setVarReg(inVarToRegMap, varIndex, targetReg); } else { interval->physReg = REG_NA; } } if (targetReg != REG_STK) { RegRecord* targetRegRecord = getRegisterRecord(targetReg); liveRegs |= getRegMask(targetReg, interval->registerType); if (!allocationPassComplete) { updateNextIntervalRef(targetReg, interval); updateSpillCost(targetReg, interval); } if (!interval->isActive) { interval->isActive = true; interval->physReg = targetReg; interval->assignedReg = targetRegRecord; } if (targetRegRecord->assignedInterval != interval) { #ifdef TARGET_ARM // If this is a TYP_DOUBLE interval, and the assigned interval is either null or is TYP_FLOAT, // we also need to unassign the other half of the register. // Note that if the assigned interval is TYP_DOUBLE, it will be unassigned below. if ((interval->registerType == TYP_DOUBLE) && ((targetRegRecord->assignedInterval == nullptr) || (targetRegRecord->assignedInterval->registerType == TYP_FLOAT))) { assert(genIsValidDoubleReg(targetReg)); unassignIntervalBlockStart(getSecondHalfRegRec(targetRegRecord), allocationPassComplete ? nullptr : inVarToRegMap); } // If this is a TYP_FLOAT interval, and the assigned interval was TYP_DOUBLE, we also // need to update the liveRegs to specify that the other half is not live anymore. // As mentioned above, for TYP_DOUBLE, the other half will be unassigned further below. if ((interval->registerType == TYP_FLOAT) && ((targetRegRecord->assignedInterval != nullptr) && (targetRegRecord->assignedInterval->registerType == TYP_DOUBLE))) { RegRecord* anotherHalfRegRec = findAnotherHalfRegRec(targetRegRecord); // Use TYP_FLOAT to get the regmask of just the half reg. liveRegs &= ~getRegMask(anotherHalfRegRec->regNum, TYP_FLOAT); } #endif // TARGET_ARM unassignIntervalBlockStart(targetRegRecord, allocationPassComplete ? nullptr : inVarToRegMap); assignPhysReg(targetRegRecord, interval); } if (interval->recentRefPosition != nullptr && !interval->recentRefPosition->copyReg && interval->recentRefPosition->registerAssignment != genRegMask(targetReg)) { interval->getNextRefPosition()->outOfOrder = true; } } } // Unassign any registers that are no longer live, and set register state, if allocating. if (!allocationPassComplete) { resetRegState(); setRegsInUse(liveRegs); } for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg)) { RegRecord* physRegRecord = getRegisterRecord(reg); if ((liveRegs & genRegMask(reg)) == 0) { makeRegAvailable(reg, physRegRecord->registerType); Interval* assignedInterval = physRegRecord->assignedInterval; if (assignedInterval != nullptr) { assert(assignedInterval->isLocalVar || assignedInterval->isConstant || assignedInterval->IsUpperVector()); if (!assignedInterval->isConstant && assignedInterval->assignedReg == physRegRecord) { assignedInterval->isActive = false; if (assignedInterval->getNextRefPosition() == nullptr) { unassignPhysReg(physRegRecord, nullptr); } if (!assignedInterval->IsUpperVector()) { inVarToRegMap[assignedInterval->getVarIndex(compiler)] = REG_STK; } } else { // This interval may still be active, but was in another register in an // intervening block. updateAssignedInterval(physRegRecord, nullptr, assignedInterval->registerType); } #ifdef TARGET_ARM // unassignPhysReg, above, may have restored a 'previousInterval', in which case we need to // get the value of 'physRegRecord->assignedInterval' rather than using 'assignedInterval'. if (physRegRecord->assignedInterval != nullptr) { assignedInterval = physRegRecord->assignedInterval; } if (assignedInterval->registerType == TYP_DOUBLE) { // Skip next float register, because we already addressed a double register assert(genIsValidDoubleReg(reg)); reg = REG_NEXT(reg); makeRegAvailable(reg, physRegRecord->registerType); } #endif // TARGET_ARM } } #ifdef TARGET_ARM else { Interval* assignedInterval = physRegRecord->assignedInterval; if (assignedInterval != nullptr && assignedInterval->registerType == TYP_DOUBLE) { // Skip next float register, because we already addressed a double register assert(genIsValidDoubleReg(reg)); reg = REG_NEXT(reg); } } #endif // TARGET_ARM } } //------------------------------------------------------------------------ // processBlockEndLocations: Record the variables occupying registers after completing the current block. // // Arguments: // currentBlock - the block we have just completed. // // Return Value: // None // // Notes: // This must be called both during the allocation and resolution (write-back) phases. // This is because we need to have the outVarToRegMap locations in order to set the locations // at successor blocks during allocation time, but if lclVars are spilled after a block has been // completed, we need to record the REG_STK location for those variables at resolution time. void LinearScan::processBlockEndLocations(BasicBlock* currentBlock) { assert(currentBlock != nullptr && currentBlock->bbNum == curBBNum); VarToRegMap outVarToRegMap = getOutVarToRegMap(curBBNum); VarSetOps::AssignNoCopy(compiler, currentLiveVars, VarSetOps::Intersection(compiler, registerCandidateVars, currentBlock->bbLiveOut)); #ifdef DEBUG if (getLsraExtendLifeTimes()) { VarSetOps::Assign(compiler, currentLiveVars, registerCandidateVars); } #endif // DEBUG VarSetOps::Iter iter(compiler, currentLiveVars); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { Interval* interval = getIntervalForLocalVar(varIndex); if (interval->isActive) { assert(interval->physReg != REG_NA && interval->physReg != REG_STK); setVarReg(outVarToRegMap, varIndex, interval->physReg); } else { outVarToRegMap[varIndex] = REG_STK; } #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // Ensure that we have no partially-spilled large vector locals. assert(!Compiler::varTypeNeedsPartialCalleeSave(interval->registerType) || !interval->isPartiallySpilled); #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE } INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_END_BB)); } #ifdef DEBUG void LinearScan::dumpRefPositions(const char* str) { printf("------------\n"); printf("REFPOSITIONS %s: \n", str); printf("------------\n"); for (RefPosition& refPos : refPositions) { refPos.dump(this); } } #endif // DEBUG //------------------------------------------------------------------------ // LinearScan::makeRegisterInactive: Make the interval currently assigned to // a register inactive. // // Arguments: // physRegRecord - the RegRecord for the register // // Return Value: // None. // // Notes: // It may be that the RegRecord has already been freed, e.g. due to a kill, // or it may be that the register was a copyReg, so is not the assigned register // of the Interval currently occupying the register, in which case this method has no effect. // void LinearScan::makeRegisterInactive(RegRecord* physRegRecord) { Interval* assignedInterval = physRegRecord->assignedInterval; // It may have already been freed by a "Kill" if ((assignedInterval != nullptr) && (assignedInterval->physReg == physRegRecord->regNum)) { assignedInterval->isActive = false; if (assignedInterval->isConstant) { clearNextIntervalRef(physRegRecord->regNum, assignedInterval->registerType); } } } //------------------------------------------------------------------------ // LinearScan::freeRegister: Make a register available for use // // Arguments: // physRegRecord - the RegRecord for the register to be freed. // // Return Value: // None. // // Assumptions: // None. // It may be that the RegRecord has already been freed, e.g. due to a kill, // in which case this method has no effect. // // Notes: // If there is currently an Interval assigned to this register, and it has // more references (i.e. this is a local last-use, but more uses and/or // defs remain), it will remain assigned to the physRegRecord. However, since // it is marked inactive, the register will be available, albeit less desirable // to allocate. // void LinearScan::freeRegister(RegRecord* physRegRecord) { Interval* assignedInterval = physRegRecord->assignedInterval; makeRegAvailable(physRegRecord->regNum, physRegRecord->registerType); clearSpillCost(physRegRecord->regNum, physRegRecord->registerType); makeRegisterInactive(physRegRecord); if (assignedInterval != nullptr) { // TODO: Under the following conditions we should be just putting it in regsToMakeInactive // not regsToFree. // // We don't unassign in the following conditions: // - If this is a constant node, that we may encounter again, OR // - If its recent RefPosition is not a last-use and its next RefPosition is non-null. // - If there are no more RefPositions, or the next // one is a def. Note that the latter condition doesn't actually ensure that // there aren't subsequent uses that could be reached by a value in the assigned // register, but is merely a heuristic to avoid tying up the register (or using // it when it's non-optimal). A better alternative would be to use SSA, so that // we wouldn't unnecessarily link separate live ranges to the same register. // RefPosition* nextRefPosition = assignedInterval->getNextRefPosition(); if (!assignedInterval->isConstant && (nextRefPosition == nullptr || RefTypeIsDef(nextRefPosition->refType))) { #ifdef TARGET_ARM assert((assignedInterval->registerType != TYP_DOUBLE) || genIsValidDoubleReg(physRegRecord->regNum)); #endif // TARGET_ARM unassignPhysReg(physRegRecord, nullptr); } } } //------------------------------------------------------------------------ // LinearScan::freeRegisters: Free the registers in 'regsToFree' // // Arguments: // regsToFree - the mask of registers to free // void LinearScan::freeRegisters(regMaskTP regsToFree) { if (regsToFree == RBM_NONE) { return; } INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_FREE_REGS)); makeRegsAvailable(regsToFree); while (regsToFree != RBM_NONE) { regMaskTP nextRegBit = genFindLowestBit(regsToFree); regsToFree &= ~nextRegBit; regNumber nextReg = genRegNumFromMask(nextRegBit); RegRecord* regRecord = getRegisterRecord(nextReg); #ifdef TARGET_ARM if (regRecord->assignedInterval != nullptr && (regRecord->assignedInterval->registerType == TYP_DOUBLE)) { assert(genIsValidDoubleReg(nextReg)); regsToFree &= ~(nextRegBit << 1); } #endif freeRegister(regRecord); } } //------------------------------------------------------------------------ // LinearScan::allocateRegisters: Perform the actual register allocation by iterating over // all of the previously constructed Intervals // void LinearScan::allocateRegisters() { JITDUMP("*************** In LinearScan::allocateRegisters()\n"); DBEXEC(VERBOSE, lsraDumpIntervals("before allocateRegisters")); // at start, nothing is active except for register args for (Interval& interval : intervals) { Interval* currentInterval = &interval; currentInterval->recentRefPosition = nullptr; currentInterval->isActive = false; if (currentInterval->isLocalVar) { LclVarDsc* varDsc = currentInterval->getLocalVar(compiler); if (varDsc->lvIsRegArg && currentInterval->firstRefPosition != nullptr) { currentInterval->isActive = true; } } } if (enregisterLocalVars) { #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE VarSetOps::Iter largeVectorVarsIter(compiler, largeVectorVars); unsigned largeVectorVarIndex = 0; while (largeVectorVarsIter.NextElem(&largeVectorVarIndex)) { Interval* lclVarInterval = getIntervalForLocalVar(largeVectorVarIndex); lclVarInterval->isPartiallySpilled = false; } #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE } resetRegState(); for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg)) { RegRecord* physRegRecord = getRegisterRecord(reg); physRegRecord->recentRefPosition = nullptr; updateNextFixedRef(physRegRecord, physRegRecord->firstRefPosition); // Is this an incoming arg register? (Note that we don't, currently, consider reassigning // an incoming arg register as having spill cost.) Interval* interval = physRegRecord->assignedInterval; if (interval != nullptr) { #ifdef TARGET_ARM if ((interval->registerType != TYP_DOUBLE) || genIsValidDoubleReg(reg)) #endif // TARGET_ARM { updateNextIntervalRef(reg, interval); updateSpillCost(reg, interval); setRegInUse(reg, interval->registerType); INDEBUG(registersToDump |= getRegMask(reg, interval->registerType)); } } else { clearNextIntervalRef(reg, physRegRecord->registerType); clearSpillCost(reg, physRegRecord->registerType); } } #ifdef DEBUG if (VERBOSE) { dumpRefPositions("BEFORE ALLOCATION"); dumpVarRefPositions("BEFORE ALLOCATION"); printf("\n\nAllocating Registers\n" "--------------------\n"); // Start with a small set of commonly used registers, so that we don't keep having to print a new title. // Include all the arg regs, as they may already have values assigned to them. registersToDump = LsraLimitSmallIntSet | LsraLimitSmallFPSet | RBM_ARG_REGS; dumpRegRecordHeader(); // Now print an empty "RefPosition", since we complete the dump of the regs at the beginning of the loop. printf(indentFormat, ""); } #endif // DEBUG BasicBlock* currentBlock = nullptr; LsraLocation prevLocation = MinLocation; regMaskTP regsToFree = RBM_NONE; regMaskTP delayRegsToFree = RBM_NONE; regMaskTP regsToMakeInactive = RBM_NONE; regMaskTP delayRegsToMakeInactive = RBM_NONE; regMaskTP copyRegsToFree = RBM_NONE; regsInUseThisLocation = RBM_NONE; regsInUseNextLocation = RBM_NONE; // This is the most recent RefPosition for which a register was allocated // - currently only used for DEBUG but maintained in non-debug, for clarity of code // (and will be optimized away because in non-debug spillAlways() unconditionally returns false) RefPosition* lastAllocatedRefPosition = nullptr; bool handledBlockEnd = false; for (RefPosition& refPositionIterator : refPositions) { RefPosition* currentRefPosition = &refPositionIterator; RefPosition* nextRefPosition = currentRefPosition->nextRefPosition; // TODO: Can we combine this with the freeing of registers below? It might // mess with the dump, since this was previously being done before the call below // to dumpRegRecords. regMaskTP tempRegsToMakeInactive = (regsToMakeInactive | delayRegsToMakeInactive); while (tempRegsToMakeInactive != RBM_NONE) { regMaskTP nextRegBit = genFindLowestBit(tempRegsToMakeInactive); tempRegsToMakeInactive &= ~nextRegBit; regNumber nextReg = genRegNumFromMask(nextRegBit); RegRecord* regRecord = getRegisterRecord(nextReg); clearSpillCost(regRecord->regNum, regRecord->registerType); makeRegisterInactive(regRecord); } if (currentRefPosition->nodeLocation > prevLocation) { makeRegsAvailable(regsToMakeInactive); // TODO: Clean this up. We need to make the delayRegs inactive as well, but don't want // to mark them as free yet. regsToMakeInactive |= delayRegsToMakeInactive; regsToMakeInactive = delayRegsToMakeInactive; delayRegsToMakeInactive = RBM_NONE; } #ifdef DEBUG // Set the activeRefPosition to null until we're done with any boundary handling. activeRefPosition = nullptr; if (VERBOSE) { // We're really dumping the RegRecords "after" the previous RefPosition, but it's more convenient // to do this here, since there are a number of "continue"s in this loop. dumpRegRecords(); } #endif // DEBUG // This is the previousRefPosition of the current Referent, if any RefPosition* previousRefPosition = nullptr; Interval* currentInterval = nullptr; Referenceable* currentReferent = nullptr; RefType refType = currentRefPosition->refType; currentReferent = currentRefPosition->referent; if (spillAlways() && lastAllocatedRefPosition != nullptr && !lastAllocatedRefPosition->IsPhysRegRef() && !lastAllocatedRefPosition->getInterval()->isInternal && (RefTypeIsDef(lastAllocatedRefPosition->refType) || lastAllocatedRefPosition->getInterval()->isLocalVar)) { assert(lastAllocatedRefPosition->registerAssignment != RBM_NONE); RegRecord* regRecord = lastAllocatedRefPosition->getInterval()->assignedReg; unassignPhysReg(regRecord, lastAllocatedRefPosition); // Now set lastAllocatedRefPosition to null, so that we don't try to spill it again lastAllocatedRefPosition = nullptr; } // We wait to free any registers until we've completed all the // uses for the current node. // This avoids reusing registers too soon. // We free before the last true def (after all the uses & internal // registers), and then again at the beginning of the next node. // This is made easier by assigning two LsraLocations per node - one // for all the uses, internal registers & all but the last def, and // another for the final def (if any). LsraLocation currentLocation = currentRefPosition->nodeLocation; // Free at a new location. if (currentLocation > prevLocation) { // CopyRegs are simply made available - we don't want to make the associated interval inactive. makeRegsAvailable(copyRegsToFree); copyRegsToFree = RBM_NONE; regsInUseThisLocation = regsInUseNextLocation; regsInUseNextLocation = RBM_NONE; if ((regsToFree | delayRegsToFree) != RBM_NONE) { freeRegisters(regsToFree); if ((currentLocation > (prevLocation + 1)) && (delayRegsToFree != RBM_NONE)) { // We should never see a delayReg that is delayed until a Location that has no RefPosition // (that would be the RefPosition that it was supposed to interfere with). assert(!"Found a delayRegFree associated with Location with no reference"); // However, to be cautious for the Release build case, we will free them. freeRegisters(delayRegsToFree); delayRegsToFree = RBM_NONE; regsInUseThisLocation = RBM_NONE; } regsToFree = delayRegsToFree; delayRegsToFree = RBM_NONE; #ifdef DEBUG // Validate the current state just after we've freed the registers. This ensures that any pending // freed registers will have had their state updated to reflect the intervals they were holding. for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg)) { regMaskTP regMask = genRegMask(reg); // If this isn't available or if it's still waiting to be freed (i.e. it was in // delayRegsToFree and so now it's in regsToFree), then skip it. if ((regMask & (availableIntRegs | availableFloatRegs) & ~regsToFree) == RBM_NONE) { continue; } RegRecord* physRegRecord = getRegisterRecord(reg); Interval* assignedInterval = physRegRecord->assignedInterval; if (assignedInterval != nullptr) { bool isAssignedReg = (assignedInterval->physReg == reg); RefPosition* recentRefPosition = assignedInterval->recentRefPosition; // If we have a copyReg or a moveReg, we might have assigned this register to an Interval, // but that isn't considered its assignedReg. if (recentRefPosition != nullptr) { if (recentRefPosition->refType == RefTypeExpUse) { // We don't update anything on these, as they're just placeholders to extend the // lifetime. continue; } // For copyReg or moveReg, we don't have anything further to assert. if (recentRefPosition->copyReg || recentRefPosition->moveReg) { continue; } assert(assignedInterval->isConstant == isRegConstant(reg, assignedInterval->registerType)); if (assignedInterval->isActive) { // If this is not the register most recently allocated, it must be from a copyReg, // it was placed there by the inVarToRegMap or it might be one of the upper vector // save/restore refPosition. // In either case it must be a lclVar. if (!isAssignedToInterval(assignedInterval, physRegRecord)) { // We'd like to assert that this was either set by the inVarToRegMap, or by // a copyReg, but we can't traverse backward to check for a copyReg, because // we only have recentRefPosition, and there may be a previous RefPosition // at the same Location with a copyReg. bool sanityCheck = assignedInterval->isLocalVar; // For upper vector interval, make sure it was one of the save/restore only. if (assignedInterval->IsUpperVector()) { sanityCheck |= (recentRefPosition->refType == RefTypeUpperVectorSave) || (recentRefPosition->refType == RefTypeUpperVectorRestore); } assert(sanityCheck); } if (isAssignedReg) { assert(nextIntervalRef[reg] == assignedInterval->getNextRefLocation()); assert(!isRegAvailable(reg, assignedInterval->registerType)); assert((recentRefPosition == nullptr) || (spillCost[reg] == getSpillWeight(physRegRecord))); } else { assert((nextIntervalRef[reg] == MaxLocation) || isRegBusy(reg, assignedInterval->registerType)); } } else { if ((assignedInterval->physReg == reg) && !assignedInterval->isConstant) { assert(nextIntervalRef[reg] == assignedInterval->getNextRefLocation()); } else { assert(nextIntervalRef[reg] == MaxLocation); assert(isRegAvailable(reg, assignedInterval->registerType)); assert(spillCost[reg] == 0); } } } } else { assert(isRegAvailable(reg, physRegRecord->registerType)); assert(!isRegConstant(reg, physRegRecord->registerType)); assert(nextIntervalRef[reg] == MaxLocation); assert(spillCost[reg] == 0); } LsraLocation thisNextFixedRef = physRegRecord->getNextRefLocation(); assert(nextFixedRef[reg] == thisNextFixedRef); #ifdef TARGET_ARM // If this is occupied by a double interval, skip the corresponding float reg. if ((assignedInterval != nullptr) && (assignedInterval->registerType == TYP_DOUBLE)) { reg = REG_NEXT(reg); } #endif } #endif // DEBUG } } prevLocation = currentLocation; // get previous refposition, then current refpos is the new previous if (currentReferent != nullptr) { previousRefPosition = currentReferent->recentRefPosition; currentReferent->recentRefPosition = currentRefPosition; } else { assert((refType == RefTypeBB) || (refType == RefTypeKillGCRefs)); } #ifdef DEBUG activeRefPosition = currentRefPosition; // For the purposes of register resolution, we handle the DummyDefs before // the block boundary - so the RefTypeBB is after all the DummyDefs. // However, for the purposes of allocation, we want to handle the block // boundary first, so that we can free any registers occupied by lclVars // that aren't live in the next block and make them available for the // DummyDefs. // If we've already handled the BlockEnd, but now we're seeing the RefTypeBB, // dump it now. if ((refType == RefTypeBB) && handledBlockEnd) { dumpNewBlock(currentBlock, currentRefPosition->nodeLocation); } #endif // DEBUG if (!handledBlockEnd && (refType == RefTypeBB || refType == RefTypeDummyDef)) { // Free any delayed regs (now in regsToFree) before processing the block boundary freeRegisters(regsToFree); regsToFree = RBM_NONE; regsInUseThisLocation = RBM_NONE; regsInUseNextLocation = RBM_NONE; handledBlockEnd = true; curBBStartLocation = currentRefPosition->nodeLocation; if (currentBlock == nullptr) { currentBlock = startBlockSequence(); INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_START_BB, nullptr, REG_NA, compiler->fgFirstBB)); } else { processBlockEndAllocation(currentBlock); currentBlock = moveToNextBlock(); INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_START_BB, nullptr, REG_NA, currentBlock)); } } if (refType == RefTypeBB) { handledBlockEnd = false; continue; } if (refType == RefTypeKillGCRefs) { spillGCRefs(currentRefPosition); continue; } if (currentRefPosition->isPhysRegRef) { RegRecord* regRecord = currentRefPosition->getReg(); Interval* assignedInterval = regRecord->assignedInterval; updateNextFixedRef(regRecord, currentRefPosition->nextRefPosition); // If this is a FixedReg, disassociate any inactive constant interval from this register. // Otherwise, do nothing. if (refType == RefTypeFixedReg) { if (assignedInterval != nullptr && !assignedInterval->isActive && assignedInterval->isConstant) { clearConstantReg(regRecord->regNum, assignedInterval->registerType); regRecord->assignedInterval = nullptr; spillCost[regRecord->regNum] = 0; #ifdef TARGET_ARM // Update overlapping floating point register for TYP_DOUBLE if (assignedInterval->registerType == TYP_DOUBLE) { RegRecord* otherRegRecord = findAnotherHalfRegRec(regRecord); assert(otherRegRecord->assignedInterval == assignedInterval); otherRegRecord->assignedInterval = nullptr; spillCost[otherRegRecord->regNum] = 0; } #endif // TARGET_ARM } regsInUseThisLocation |= currentRefPosition->registerAssignment; INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_FIXED_REG, nullptr, currentRefPosition->assignedReg())); continue; } if (refType == RefTypeKill) { if (assignedInterval != nullptr) { unassignPhysReg(regRecord, assignedInterval->recentRefPosition); clearConstantReg(regRecord->regNum, assignedInterval->registerType); makeRegAvailable(regRecord->regNum, assignedInterval->registerType); } clearRegBusyUntilKill(regRecord->regNum); INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, regRecord->regNum)); continue; } } // If this is an exposed use, do nothing - this is merely a placeholder to attempt to // ensure that a register is allocated for the full lifetime. The resolution logic // will take care of moving to the appropriate register if needed. if (refType == RefTypeExpUse) { INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_EXP_USE)); currentInterval = currentRefPosition->getInterval(); if (currentInterval->physReg != REG_NA) { updateNextIntervalRef(currentInterval->physReg, currentInterval); } continue; } regNumber assignedRegister = REG_NA; assert(currentRefPosition->isIntervalRef()); currentInterval = currentRefPosition->getInterval(); assert(currentInterval != nullptr); assignedRegister = currentInterval->physReg; // Identify the special cases where we decide up-front not to allocate bool allocate = true; bool didDump = false; if (refType == RefTypeParamDef || refType == RefTypeZeroInit) { if (nextRefPosition == nullptr) { // If it has no actual references, mark it as "lastUse"; since they're not actually part // of any flow they won't have been marked during dataflow. Otherwise, if we allocate a // register we won't unassign it. INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_ZERO_REF, currentInterval)); currentRefPosition->lastUse = true; } LclVarDsc* varDsc = currentInterval->getLocalVar(compiler); assert(varDsc != nullptr); assert(!blockInfo[compiler->fgFirstBB->bbNum].hasEHBoundaryIn || currentInterval->isWriteThru); if (blockInfo[compiler->fgFirstBB->bbNum].hasEHBoundaryIn || blockInfo[compiler->fgFirstBB->bbNum].hasEHPred) { allocate = false; } else if (refType == RefTypeParamDef && (varDsc->lvRefCntWtd() <= BB_UNITY_WEIGHT) && (!currentRefPosition->lastUse || (currentInterval->physReg == REG_STK))) { // If this is a low ref-count parameter, and either it is used (def is not the last use) or it's // passed on the stack, don't allocate a register. // Note that if this is an unused register parameter we don't want to set allocate to false because that // will cause us to allocate stack space to spill it. allocate = false; } else if ((currentInterval->physReg == REG_STK) && nextRefPosition->treeNode->OperIs(GT_BITCAST)) { // In the case of ABI mismatches, avoid allocating a register only to have to immediately move // it to a different register file. allocate = false; } else if ((currentInterval->isWriteThru) && (refType == RefTypeZeroInit)) { // For RefTypeZeroInit which is a write thru, there is no need to allocate register // right away. It can be assigned when actually definition occurs. // In future, see if avoiding allocation for RefTypeZeroInit gives any benefit in general. allocate = false; } if (!allocate) { INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NO_ENTRY_REG_ALLOCATED, currentInterval)); didDump = true; setIntervalAsSpilled(currentInterval); if (assignedRegister != REG_NA) { clearNextIntervalRef(assignedRegister, currentInterval->registerType); clearSpillCost(assignedRegister, currentInterval->registerType); makeRegAvailable(assignedRegister, currentInterval->registerType); } } } #ifdef FEATURE_SIMD #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE else if (currentInterval->isUpperVector) { // This is a save or restore of the upper half of a large vector lclVar. Interval* lclVarInterval = currentInterval->relatedInterval; assert(lclVarInterval->isLocalVar); if (refType == RefTypeUpperVectorSave) { if ((lclVarInterval->physReg == REG_NA) || (lclVarInterval->isPartiallySpilled && (currentInterval->physReg == REG_STK))) { allocate = false; } else { lclVarInterval->isPartiallySpilled = true; } } else if (refType == RefTypeUpperVectorRestore) { assert(currentInterval->isUpperVector); if (lclVarInterval->isPartiallySpilled) { lclVarInterval->isPartiallySpilled = false; } else { allocate = false; } } } else if (refType == RefTypeUpperVectorSave) { assert(!currentInterval->isLocalVar); // Note that this case looks a lot like the case below, but in this case we need to spill // at the previous RefPosition. // We may want to consider allocating two callee-save registers for this case, but it happens rarely // enough that it may not warrant the additional complexity. if (assignedRegister != REG_NA) { unassignPhysReg(getRegisterRecord(assignedRegister), currentInterval->firstRefPosition); INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, currentInterval)); } currentRefPosition->registerAssignment = RBM_NONE; continue; } #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE #endif // FEATURE_SIMD if (allocate == false) { if (assignedRegister != REG_NA) { unassignPhysReg(getRegisterRecord(assignedRegister), currentRefPosition); } else if (!didDump) { INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, currentInterval)); didDump = true; } currentRefPosition->registerAssignment = RBM_NONE; continue; } if (currentInterval->isSpecialPutArg) { assert(!currentInterval->isLocalVar); Interval* srcInterval = currentInterval->relatedInterval; assert(srcInterval != nullptr && srcInterval->isLocalVar); if (refType == RefTypeDef) { assert(srcInterval->recentRefPosition->nodeLocation == currentLocation - 1); RegRecord* physRegRecord = srcInterval->assignedReg; // For a putarg_reg to be special, its next use location has to be the same // as fixed reg's next kill location. Otherwise, if source lcl var's next use // is after the kill of fixed reg but before putarg_reg's next use, fixed reg's // kill would lead to spill of source but not the putarg_reg if it were treated // as special. if (srcInterval->isActive && genRegMask(srcInterval->physReg) == currentRefPosition->registerAssignment && currentInterval->getNextRefLocation() == nextFixedRef[srcInterval->physReg]) { assert(physRegRecord->regNum == srcInterval->physReg); // Special putarg_reg acts as a pass-thru since both source lcl var // and putarg_reg have the same register allocated. Physical reg // record of reg continue to point to source lcl var's interval // instead of to putarg_reg's interval. So if a spill of reg // allocated to source lcl var happens, to reallocate to another // tree node, before its use at call node it will lead to spill of // lcl var instead of putarg_reg since physical reg record is pointing // to lcl var's interval. As a result, arg reg would get trashed leading // to bad codegen. The assumption here is that source lcl var of a // special putarg_reg doesn't get spilled and re-allocated prior to // its use at the call node. This is ensured by marking physical reg // record as busy until next kill. setRegBusyUntilKill(srcInterval->physReg, srcInterval->registerType); } else { currentInterval->isSpecialPutArg = false; } } // If this is still a SpecialPutArg, continue; if (currentInterval->isSpecialPutArg) { INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_SPECIAL_PUTARG, currentInterval, currentRefPosition->assignedReg())); continue; } } if (assignedRegister == REG_NA && RefTypeIsUse(refType)) { currentRefPosition->reload = true; INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_RELOAD, currentInterval, assignedRegister)); } regMaskTP assignedRegBit = RBM_NONE; bool isInRegister = false; if (assignedRegister != REG_NA) { isInRegister = true; assignedRegBit = genRegMask(assignedRegister); if (!currentInterval->isActive) { // If this is a use, it must have started the block on the stack, but the register // was available for use so we kept the association. if (RefTypeIsUse(refType)) { assert(enregisterLocalVars); assert(inVarToRegMaps[curBBNum][currentInterval->getVarIndex(compiler)] == REG_STK && previousRefPosition->nodeLocation <= curBBStartLocation); isInRegister = false; } else { currentInterval->isActive = true; setRegInUse(assignedRegister, currentInterval->registerType); updateSpillCost(assignedRegister, currentInterval); } updateNextIntervalRef(assignedRegister, currentInterval); } assert(currentInterval->assignedReg != nullptr && currentInterval->assignedReg->regNum == assignedRegister && currentInterval->assignedReg->assignedInterval == currentInterval); } if (previousRefPosition != nullptr) { assert(previousRefPosition->nextRefPosition == currentRefPosition); assert(assignedRegister == REG_NA || assignedRegBit == previousRefPosition->registerAssignment || currentRefPosition->outOfOrder || previousRefPosition->copyReg || previousRefPosition->refType == RefTypeExpUse || currentRefPosition->refType == RefTypeDummyDef); } else if (assignedRegister != REG_NA) { // Handle the case where this is a preassigned register (i.e. parameter). // We don't want to actually use the preassigned register if it's not // going to cover the lifetime - but we had to preallocate it to ensure // that it remained live. // TODO-CQ: At some point we may want to refine the analysis here, in case // it might be beneficial to keep it in this reg for PART of the lifetime if (currentInterval->isLocalVar) { regMaskTP preferences = currentInterval->registerPreferences; bool keepAssignment = true; bool matchesPreferences = (preferences & genRegMask(assignedRegister)) != RBM_NONE; // Will the assigned register cover the lifetime? If not, does it at least // meet the preferences for the next RefPosition? LsraLocation nextPhysRegLocation = nextFixedRef[assignedRegister]; if (nextPhysRegLocation <= currentInterval->lastRefPosition->nodeLocation) { // Check to see if the existing assignment matches the preferences (e.g. callee save registers) // and ensure that the next use of this localVar does not occur after the nextPhysRegRefPos // There must be a next RefPosition, because we know that the Interval extends beyond the // nextPhysRegRefPos. assert(nextRefPosition != nullptr); if (!matchesPreferences || nextPhysRegLocation < nextRefPosition->nodeLocation) { keepAssignment = false; } else if ((nextRefPosition->registerAssignment != assignedRegBit) && (nextPhysRegLocation <= nextRefPosition->getRefEndLocation())) { keepAssignment = false; } } else if (refType == RefTypeParamDef && !matchesPreferences) { // Don't use the register, even if available, if it doesn't match the preferences. // Note that this case is only for ParamDefs, for which we haven't yet taken preferences // into account (we've just automatically got the initial location). In other cases, // we would already have put it in a preferenced register, if it was available. // TODO-CQ: Consider expanding this to check availability - that would duplicate // code here, but otherwise we may wind up in this register anyway. keepAssignment = false; } if (keepAssignment == false) { RegRecord* physRegRecord = getRegisterRecord(currentInterval->physReg); currentRefPosition->registerAssignment = allRegs(currentInterval->registerType); currentRefPosition->isFixedRegRef = false; unassignPhysRegNoSpill(physRegRecord); // If the preferences are currently set to just this register, reset them to allRegs // of the appropriate type (just as we just reset the registerAssignment for this // RefPosition. // Otherwise, simply remove this register from the preferences, if it's there. if (currentInterval->registerPreferences == assignedRegBit) { currentInterval->registerPreferences = currentRefPosition->registerAssignment; } else { currentInterval->registerPreferences &= ~assignedRegBit; } assignedRegister = REG_NA; assignedRegBit = RBM_NONE; } } } if (assignedRegister != REG_NA) { RegRecord* physRegRecord = getRegisterRecord(assignedRegister); assert((assignedRegBit == currentRefPosition->registerAssignment) || (physRegRecord->assignedInterval == currentInterval) || !isRegInUse(assignedRegister, currentInterval->registerType)); if (conflictingFixedRegReference(assignedRegister, currentRefPosition)) { // We may have already reassigned the register to the conflicting reference. // If not, we need to unassign this interval. if (physRegRecord->assignedInterval == currentInterval) { unassignPhysRegNoSpill(physRegRecord); physRegRecord->assignedInterval = nullptr; clearConstantReg(assignedRegister, currentInterval->registerType); } currentRefPosition->moveReg = true; assignedRegister = REG_NA; currentRefPosition->registerAssignment &= ~assignedRegBit; setIntervalAsSplit(currentInterval); INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_MOVE_REG, currentInterval, assignedRegister)); } else if ((genRegMask(assignedRegister) & currentRefPosition->registerAssignment) != 0) { currentRefPosition->registerAssignment = assignedRegBit; if (!currentInterval->isActive) { // If we've got an exposed use at the top of a block, the // interval might not have been active. Otherwise if it's a use, // the interval must be active. if (refType == RefTypeDummyDef) { currentInterval->isActive = true; assert(getRegisterRecord(assignedRegister)->assignedInterval == currentInterval); } else { currentRefPosition->reload = true; } } INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, currentInterval, assignedRegister)); } else { // It's already in a register, but not one we need. if (!RefTypeIsDef(currentRefPosition->refType)) { regNumber copyReg = assignCopyReg(currentRefPosition); lastAllocatedRefPosition = currentRefPosition; bool unassign = false; if (currentInterval->isWriteThru) { if (currentRefPosition->refType == RefTypeDef) { currentRefPosition->writeThru = true; } if (!currentRefPosition->lastUse) { if (currentRefPosition->spillAfter) { unassign = true; } } } regMaskTP copyRegMask = getRegMask(copyReg, currentInterval->registerType); regMaskTP assignedRegMask = getRegMask(assignedRegister, currentInterval->registerType); regsInUseThisLocation |= copyRegMask | assignedRegMask; if (currentRefPosition->lastUse) { if (currentRefPosition->delayRegFree) { INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_LAST_USE_DELAYED, currentInterval, assignedRegister)); delayRegsToFree |= copyRegMask | assignedRegMask; regsInUseNextLocation |= copyRegMask | assignedRegMask; } else { INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_LAST_USE, currentInterval, assignedRegister)); regsToFree |= copyRegMask | assignedRegMask; } } else { copyRegsToFree |= copyRegMask; if (currentRefPosition->delayRegFree) { regsInUseNextLocation |= copyRegMask | assignedRegMask; } } // If this is a tree temp (non-localVar) interval, we will need an explicit move. // Note: In theory a moveReg should cause the Interval to now have the new reg as its // assigned register. However, that's not currently how this works. // If we ever actually move lclVar intervals instead of copying, this will need to change. if (!currentInterval->isLocalVar) { currentRefPosition->moveReg = true; currentRefPosition->copyReg = false; } clearNextIntervalRef(copyReg, currentInterval->registerType); clearSpillCost(copyReg, currentInterval->registerType); updateNextIntervalRef(assignedRegister, currentInterval); updateSpillCost(assignedRegister, currentInterval); continue; } else { INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NEEDS_NEW_REG, nullptr, assignedRegister)); regsToFree |= getRegMask(assignedRegister, currentInterval->registerType); // We want a new register, but we don't want this to be considered a spill. assignedRegister = REG_NA; if (physRegRecord->assignedInterval == currentInterval) { unassignPhysRegNoSpill(physRegRecord); } } } } if (assignedRegister == REG_NA) { if (currentRefPosition->RegOptional()) { // We can avoid allocating a register if it is a last use requiring a reload. if (currentRefPosition->lastUse && currentRefPosition->reload) { allocate = false; } else if (currentInterval->isWriteThru) { // Don't allocate if the next reference is in a cold block. if (nextRefPosition == nullptr || (nextRefPosition->nodeLocation >= firstColdLoc)) { allocate = false; } } #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE && defined(TARGET_XARCH) // We can also avoid allocating a register (in fact we don't want to) if we have // an UpperVectorRestore on xarch where the value is on the stack. if ((currentRefPosition->refType == RefTypeUpperVectorRestore) && (currentInterval->physReg == REG_NA)) { assert(currentRefPosition->regOptional); allocate = false; } #endif #ifdef DEBUG // Under stress mode, don't allocate registers to RegOptional RefPositions. if (allocate && regOptionalNoAlloc()) { allocate = false; } #endif } RegisterScore registerScore = NONE; if (allocate) { // Allocate a register, if we must, or if it is profitable to do so. // If we have a fixed reg requirement, and the interval is inactive in another register, // unassign that register. if (currentRefPosition->isFixedRegRef && !currentInterval->isActive && (currentInterval->assignedReg != nullptr) && (currentInterval->assignedReg->assignedInterval == currentInterval) && (genRegMask(currentInterval->assignedReg->regNum) != currentRefPosition->registerAssignment)) { unassignPhysReg(currentInterval->assignedReg, nullptr); } assignedRegister = allocateReg(currentInterval, currentRefPosition DEBUG_ARG(&registerScore)); } // If no register was found, this RefPosition must not require a register. if (assignedRegister == REG_NA) { assert(currentRefPosition->RegOptional()); INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, currentInterval)); currentRefPosition->registerAssignment = RBM_NONE; currentRefPosition->reload = false; currentInterval->isActive = false; setIntervalAsSpilled(currentInterval); } #ifdef DEBUG else { if (VERBOSE) { if (currentInterval->isConstant && (currentRefPosition->treeNode != nullptr) && currentRefPosition->treeNode->IsReuseRegVal()) { dumpLsraAllocationEvent(LSRA_EVENT_REUSE_REG, currentInterval, assignedRegister, currentBlock, registerScore); } else { dumpLsraAllocationEvent(LSRA_EVENT_ALLOC_REG, currentInterval, assignedRegister, currentBlock, registerScore); } } } #endif // DEBUG if (refType == RefTypeDummyDef && assignedRegister != REG_NA) { setInVarRegForBB(curBBNum, currentInterval->varNum, assignedRegister); } // If we allocated a register, and this is a use of a spilled value, // it should have been marked for reload above. if (assignedRegister != REG_NA && RefTypeIsUse(refType) && !isInRegister) { assert(currentRefPosition->reload); } } // If we allocated a register, record it if (assignedRegister != REG_NA) { assignedRegBit = genRegMask(assignedRegister); regMaskTP regMask = getRegMask(assignedRegister, currentInterval->registerType); regsInUseThisLocation |= regMask; if (currentRefPosition->delayRegFree) { regsInUseNextLocation |= regMask; } currentRefPosition->registerAssignment = assignedRegBit; currentInterval->physReg = assignedRegister; regsToFree &= ~regMask; // we'll set it again later if it's dead // If this interval is dead, free the register. // The interval could be dead if this is a user variable, or if the // node is being evaluated for side effects, or a call whose result // is not used, etc. // If this is an UpperVector we'll neither free it nor preference it // (it will be freed when it is used). bool unassign = false; if (!currentInterval->IsUpperVector()) { if (currentInterval->isWriteThru) { if (currentRefPosition->refType == RefTypeDef) { currentRefPosition->writeThru = true; } if (!currentRefPosition->lastUse) { if (currentRefPosition->spillAfter) { unassign = true; } } } if (currentRefPosition->lastUse || currentRefPosition->nextRefPosition == nullptr) { assert(currentRefPosition->isIntervalRef()); // If this isn't a final use, we'll mark the register as available, but keep the association. if ((refType != RefTypeExpUse) && (currentRefPosition->nextRefPosition == nullptr)) { unassign = true; } else { if (currentRefPosition->delayRegFree) { delayRegsToMakeInactive |= regMask; } else { regsToMakeInactive |= regMask; } // TODO-Cleanup: this makes things consistent with previous, and will enable preferences // to be propagated, but it seems less than ideal. currentInterval->isActive = false; } // Update the register preferences for the relatedInterval, if this is 'preferencedToDef'. // Don't propagate to subsequent relatedIntervals; that will happen as they are allocated, and we // don't know yet whether the register will be retained. if (currentInterval->relatedInterval != nullptr) { currentInterval->relatedInterval->updateRegisterPreferences(assignedRegBit); } } if (unassign) { if (currentRefPosition->delayRegFree) { delayRegsToFree |= regMask; INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_LAST_USE_DELAYED)); } else { regsToFree |= regMask; INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_LAST_USE)); } } } if (!unassign) { updateNextIntervalRef(assignedRegister, currentInterval); updateSpillCost(assignedRegister, currentInterval); } } lastAllocatedRefPosition = currentRefPosition; } #ifdef JIT32_GCENCODER // For the JIT32_GCENCODER, when lvaKeepAliveAndReportThis is true, we must either keep the "this" pointer // in the same register for the entire method, or keep it on the stack. Rather than imposing this constraint // as we allocate, we will force all refs to the stack if it is split or spilled. if (enregisterLocalVars && compiler->lvaKeepAliveAndReportThis()) { LclVarDsc* thisVarDsc = compiler->lvaGetDesc(compiler->info.compThisArg); if (thisVarDsc->lvLRACandidate) { Interval* interval = getIntervalForLocalVar(thisVarDsc->lvVarIndex); if (interval->isSplit) { // We'll have to spill this. setIntervalAsSpilled(interval); } if (interval->isSpilled) { unsigned prevBBNum = 0; for (RefPosition* ref = interval->firstRefPosition; ref != nullptr; ref = ref->nextRefPosition) { // For the resolution phase, we need to ensure that any block with exposed uses has the // incoming reg for 'this' as REG_STK. if (RefTypeIsUse(ref->refType) && (ref->bbNum != prevBBNum)) { VarToRegMap inVarToRegMap = getInVarToRegMap(ref->bbNum); setVarReg(inVarToRegMap, thisVarDsc->lvVarIndex, REG_STK); } if (ref->RegOptional()) { ref->registerAssignment = RBM_NONE; ref->reload = false; ref->spillAfter = false; } switch (ref->refType) { case RefTypeDef: if (ref->registerAssignment != RBM_NONE) { ref->spillAfter = true; } break; case RefTypeUse: if (ref->registerAssignment != RBM_NONE) { ref->reload = true; ref->spillAfter = true; ref->copyReg = false; ref->moveReg = false; } break; default: break; } prevBBNum = ref->bbNum; } } } } #endif // JIT32_GCENCODER // Free registers to clear associated intervals for resolution phase CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (getLsraExtendLifeTimes()) { // If we have extended lifetimes, we need to make sure all the registers are freed. for (size_t regNumIndex = 0; regNumIndex <= REG_FP_LAST; regNumIndex++) { RegRecord& regRecord = physRegs[regNumIndex]; Interval* interval = regRecord.assignedInterval; if (interval != nullptr) { interval->isActive = false; unassignPhysReg(&regRecord, nullptr); } } } else #endif // DEBUG { freeRegisters(regsToFree | delayRegsToFree); } #ifdef DEBUG if (VERBOSE) { // Dump the RegRecords after the last RefPosition is handled. dumpRegRecords(); printf("\n"); dumpRefPositions("AFTER ALLOCATION"); dumpVarRefPositions("AFTER ALLOCATION"); // Dump the intervals that remain active printf("Active intervals at end of allocation:\n"); // We COULD just reuse the intervalIter from above, but ArrayListIterator doesn't // provide a Reset function (!) - we'll probably replace this so don't bother // adding it for (Interval& interval : intervals) { if (interval.isActive) { printf("Active "); interval.dump(); } } printf("\n"); } #endif // DEBUG } //----------------------------------------------------------------------------- // updateAssignedInterval: Update assigned interval of register. // // Arguments: // reg - register to be updated // interval - interval to be assigned // regType - register type // // Return Value: // None // // Note: // For ARM32, two float registers consisting a double register are updated // together when "regType" is TYP_DOUBLE. // void LinearScan::updateAssignedInterval(RegRecord* reg, Interval* interval, RegisterType regType) { #ifdef TARGET_ARM // Update overlapping floating point register for TYP_DOUBLE. Interval* oldAssignedInterval = reg->assignedInterval; regNumber doubleReg = REG_NA; if (regType == TYP_DOUBLE) { RegRecord* anotherHalfReg = findAnotherHalfRegRec(reg); doubleReg = genIsValidDoubleReg(reg->regNum) ? reg->regNum : anotherHalfReg->regNum; anotherHalfReg->assignedInterval = interval; } else if ((oldAssignedInterval != nullptr) && (oldAssignedInterval->registerType == TYP_DOUBLE)) { RegRecord* anotherHalfReg = findAnotherHalfRegRec(reg); doubleReg = genIsValidDoubleReg(reg->regNum) ? reg->regNum : anotherHalfReg->regNum; anotherHalfReg->assignedInterval = nullptr; } if (doubleReg != REG_NA) { clearNextIntervalRef(doubleReg, TYP_DOUBLE); clearSpillCost(doubleReg, TYP_DOUBLE); clearConstantReg(doubleReg, TYP_DOUBLE); } #endif reg->assignedInterval = interval; if (interval != nullptr) { setRegInUse(reg->regNum, interval->registerType); if (interval->isConstant) { setConstantReg(reg->regNum, interval->registerType); } else { clearConstantReg(reg->regNum, interval->registerType); } updateNextIntervalRef(reg->regNum, interval); updateSpillCost(reg->regNum, interval); } else { clearNextIntervalRef(reg->regNum, reg->registerType); clearSpillCost(reg->regNum, reg->registerType); } } //----------------------------------------------------------------------------- // updatePreviousInterval: Update previous interval of register. // // Arguments: // reg - register to be updated // interval - interval to be assigned // regType - register type // // Return Value: // None // // Assumptions: // For ARM32, when "regType" is TYP_DOUBLE, "reg" should be a even-numbered // float register, i.e. lower half of double register. // // Note: // For ARM32, two float registers consisting a double register are updated // together when "regType" is TYP_DOUBLE. // void LinearScan::updatePreviousInterval(RegRecord* reg, Interval* interval, RegisterType regType) { reg->previousInterval = interval; #ifdef TARGET_ARM // Update overlapping floating point register for TYP_DOUBLE if (regType == TYP_DOUBLE) { RegRecord* anotherHalfReg = findAnotherHalfRegRec(reg); anotherHalfReg->previousInterval = interval; } #endif } //----------------------------------------------------------------------------- // writeLocalReg: Write the register assignment for a GT_LCL_VAR node. // // Arguments: // lclNode - The GT_LCL_VAR node // varNum - The variable number for the register // reg - The assigned register // // Return Value: // None // // Note: // For a multireg node, 'varNum' will be the field local for the given register. // void LinearScan::writeLocalReg(GenTreeLclVar* lclNode, unsigned varNum, regNumber reg) { assert((lclNode->GetLclNum() == varNum) == !lclNode->IsMultiReg()); if (lclNode->GetLclNum() == varNum) { lclNode->SetRegNum(reg); } else { assert(compiler->lvaEnregMultiRegVars); LclVarDsc* parentVarDsc = compiler->lvaGetDesc(lclNode); assert(parentVarDsc->lvPromoted); unsigned regIndex = varNum - parentVarDsc->lvFieldLclStart; assert(regIndex < MAX_MULTIREG_COUNT); lclNode->SetRegNumByIdx(reg, regIndex); } } //----------------------------------------------------------------------------- // LinearScan::resolveLocalRef // Description: // Update the graph for a local reference. // Also, track the register (if any) that is currently occupied. // Arguments: // treeNode: The lclVar that's being resolved // currentRefPosition: the RefPosition associated with the treeNode // // Details: // This method is called for each local reference, during the resolveRegisters // phase of LSRA. It is responsible for keeping the following in sync: // - varDsc->GetRegNum() (and GetOtherReg()) contain the unique register location. // If it is not in the same register through its lifetime, it is set to REG_STK. // - interval->physReg is set to the assigned register // (i.e. at the code location which is currently being handled by resolveRegisters()) // - interval->isActive is true iff the interval is live and occupying a register // - interval->isSpilled should have already been set to true if the interval is EVER spilled // - interval->isSplit is set to true if the interval does not occupy the same // register throughout the method // - RegRecord->assignedInterval points to the interval which currently occupies // the register // - For each lclVar node: // - GetRegNum()/gtRegPair is set to the currently allocated register(s). // - GTF_SPILLED is set on a use if it must be reloaded prior to use. // - GTF_SPILL is set if it must be spilled after use. // // A copyReg is an ugly case where the variable must be in a specific (fixed) register, // but it currently resides elsewhere. The register allocator must track the use of the // fixed register, but it marks the lclVar node with the register it currently lives in // and the code generator does the necessary move. // // Before beginning, the varDsc for each parameter must be set to its initial location. // // NICE: Consider tracking whether an Interval is always in the same location (register/stack) // in which case it will require no resolution. // void LinearScan::resolveLocalRef(BasicBlock* block, GenTreeLclVar* treeNode, RefPosition* currentRefPosition) { assert((block == nullptr) == (treeNode == nullptr)); assert(enregisterLocalVars); // Is this a tracked local? Or just a register allocated for loading // a non-tracked one? Interval* interval = currentRefPosition->getInterval(); assert(interval->isLocalVar); interval->recentRefPosition = currentRefPosition; LclVarDsc* varDsc = interval->getLocalVar(compiler); // NOTE: we set the LastUse flag here unless we are extending lifetimes, in which case we write // this bit in checkLastUses. This is a bit of a hack, but is necessary because codegen requires // accurate last use info that is not reflected in the lastUse bit on ref positions when we are extending // lifetimes. See also the comments in checkLastUses. if ((treeNode != nullptr) && !extendLifetimes()) { if (currentRefPosition->lastUse) { treeNode->SetLastUse(currentRefPosition->getMultiRegIdx()); } else { treeNode->ClearLastUse(currentRefPosition->getMultiRegIdx()); } if ((currentRefPosition->registerAssignment != RBM_NONE) && (interval->physReg == REG_NA) && currentRefPosition->RegOptional() && currentRefPosition->lastUse && (currentRefPosition->refType == RefTypeUse)) { // This can happen if the incoming location for the block was changed from a register to the stack // during resolution. In this case we're better off making it contained. assert(inVarToRegMaps[curBBNum][varDsc->lvVarIndex] == REG_STK); currentRefPosition->registerAssignment = RBM_NONE; writeLocalReg(treeNode->AsLclVar(), interval->varNum, REG_NA); } } if (currentRefPosition->registerAssignment == RBM_NONE) { assert(currentRefPosition->RegOptional()); assert(interval->isSpilled); varDsc->SetRegNum(REG_STK); if (interval->assignedReg != nullptr && interval->assignedReg->assignedInterval == interval) { updateAssignedInterval(interval->assignedReg, nullptr, interval->registerType); } interval->assignedReg = nullptr; interval->physReg = REG_NA; interval->isActive = false; // Set this as contained if it is not a multi-reg (we could potentially mark it s contained // if all uses are from spill, but that adds complexity. if ((currentRefPosition->refType == RefTypeUse) && !treeNode->IsMultiReg()) { assert(treeNode != nullptr); treeNode->SetContained(); } return; } // In most cases, assigned and home registers will be the same // The exception is the copyReg case, where we've assigned a register // for a specific purpose, but will be keeping the register assignment regNumber assignedReg = currentRefPosition->assignedReg(); regNumber homeReg = assignedReg; // Undo any previous association with a physical register, UNLESS this // is a copyReg if (!currentRefPosition->copyReg) { regNumber oldAssignedReg = interval->physReg; if (oldAssignedReg != REG_NA && assignedReg != oldAssignedReg) { RegRecord* oldRegRecord = getRegisterRecord(oldAssignedReg); if (oldRegRecord->assignedInterval == interval) { updateAssignedInterval(oldRegRecord, nullptr, interval->registerType); } } } if (currentRefPosition->refType == RefTypeUse && !currentRefPosition->reload) { // Was this spilled after our predecessor was scheduled? if (interval->physReg == REG_NA) { assert(inVarToRegMaps[curBBNum][varDsc->lvVarIndex] == REG_STK); currentRefPosition->reload = true; } } bool reload = currentRefPosition->reload; bool spillAfter = currentRefPosition->spillAfter; bool writeThru = currentRefPosition->writeThru; // In the reload case we either: // - Set the register to REG_STK if it will be referenced only from the home location, or // - Set the register to the assigned register and set GTF_SPILLED if it must be loaded into a register. if (reload) { assert(currentRefPosition->refType != RefTypeDef); assert(interval->isSpilled); varDsc->SetRegNum(REG_STK); if (!spillAfter) { interval->physReg = assignedReg; } // If there is no treeNode, this must be a RefTypeExpUse, in // which case we did the reload already if (treeNode != nullptr) { treeNode->gtFlags |= GTF_SPILLED; if (treeNode->IsMultiReg()) { treeNode->SetRegSpillFlagByIdx(GTF_SPILLED, currentRefPosition->getMultiRegIdx()); } if (spillAfter) { if (currentRefPosition->RegOptional()) { // This is a use of lclVar that is flagged as reg-optional // by lower/codegen and marked for both reload and spillAfter. // In this case we can avoid unnecessary reload and spill // by setting reg on lclVar to REG_STK and reg on tree node // to REG_NA. Codegen will generate the code by considering // it as a contained memory operand. // // Note that varDsc->GetRegNum() is already to REG_STK above. interval->physReg = REG_NA; writeLocalReg(treeNode->AsLclVar(), interval->varNum, REG_NA); treeNode->gtFlags &= ~GTF_SPILLED; treeNode->SetContained(); // We don't support RegOptional for multi-reg localvars. assert(!treeNode->IsMultiReg()); } else { treeNode->gtFlags |= GTF_SPILL; if (treeNode->IsMultiReg()) { treeNode->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx()); } } } } else { assert(currentRefPosition->refType == RefTypeExpUse); } } else if (spillAfter && !RefTypeIsUse(currentRefPosition->refType) && (treeNode != nullptr) && (!treeNode->IsMultiReg() || treeNode->gtGetOp1()->IsMultiRegNode())) { // In the case of a pure def, don't bother spilling - just assign it to the // stack. However, we need to remember that it was spilled. // We can't do this in the case of a multi-reg node with a non-multireg source as // we need the register to extract into. assert(interval->isSpilled); varDsc->SetRegNum(REG_STK); interval->physReg = REG_NA; writeLocalReg(treeNode->AsLclVar(), interval->varNum, REG_NA); } else // Not reload and Not pure-def that's spillAfter { if (currentRefPosition->copyReg || currentRefPosition->moveReg) { // For a copyReg or moveReg, we have two cases: // - In the first case, we have a fixedReg - i.e. a register which the code // generator is constrained to use. // The code generator will generate the appropriate move to meet the requirement. // - In the second case, we were forced to use a different register because of // interference (or JitStressRegs). // In this case, we generate a GT_COPY. // In either case, we annotate the treeNode with the register in which the value // currently lives. For moveReg, the homeReg is the new register (as assigned above). // But for copyReg, the homeReg remains unchanged. assert(treeNode != nullptr); writeLocalReg(treeNode->AsLclVar(), interval->varNum, interval->physReg); if (currentRefPosition->copyReg) { homeReg = interval->physReg; } else { assert(interval->isSplit); interval->physReg = assignedReg; } if (!currentRefPosition->isFixedRegRef || currentRefPosition->moveReg) { // This is the second case, where we need to generate a copy insertCopyOrReload(block, treeNode, currentRefPosition->getMultiRegIdx(), currentRefPosition); } } else { interval->physReg = assignedReg; if (!interval->isSpilled && !interval->isSplit) { if (varDsc->GetRegNum() != REG_STK) { // If the register assignments don't match, then this interval is split. if (varDsc->GetRegNum() != assignedReg) { setIntervalAsSplit(interval); varDsc->SetRegNum(REG_STK); } } else { varDsc->SetRegNum(assignedReg); } } } if (spillAfter) { if (treeNode != nullptr) { treeNode->gtFlags |= GTF_SPILL; if (treeNode->IsMultiReg()) { treeNode->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx()); } } assert(interval->isSpilled); interval->physReg = REG_NA; varDsc->SetRegNum(REG_STK); } if (writeThru && (treeNode != nullptr)) { // This is a def of a write-thru EH var (only defs are marked 'writeThru'). treeNode->gtFlags |= GTF_SPILL; // We also mark writeThru defs that are not last-use with GTF_SPILLED to indicate that they are conceptually // spilled and immediately "reloaded", i.e. the register remains live. // Note that we can have a "last use" write that has no exposed uses in the standard // (non-eh) control flow, but that may be used on an exception path. Hence the need // to retain these defs, and to ensure that they write. if (!currentRefPosition->lastUse) { treeNode->gtFlags |= GTF_SPILLED; if (treeNode->IsMultiReg()) { treeNode->SetRegSpillFlagByIdx(GTF_SPILLED, currentRefPosition->getMultiRegIdx()); } } } if (currentRefPosition->singleDefSpill && (treeNode != nullptr)) { // This is the first (and only) def of a single-def var (only defs are marked 'singleDefSpill'). // Mark it as GTF_SPILL, so it is spilled immediately to the stack at definition and // GTF_SPILLED, so the variable stays live in the register. // // TODO: This approach would still create the resolution moves but during codegen, will check for // `lvSpillAtSingleDef` to decide whether to generate spill or not. In future, see if there is some // better way to avoid resolution moves, perhaps by updating the varDsc->SetRegNum(REG_STK) in this // method? treeNode->gtFlags |= GTF_SPILL; treeNode->gtFlags |= GTF_SPILLED; if (treeNode->IsMultiReg()) { treeNode->SetRegSpillFlagByIdx(GTF_SPILLED, currentRefPosition->getMultiRegIdx()); } varDsc->lvSpillAtSingleDef = true; } } // Update the physRegRecord for the register, so that we know what vars are in // regs at the block boundaries RegRecord* physRegRecord = getRegisterRecord(homeReg); if (spillAfter || currentRefPosition->lastUse) { interval->isActive = false; interval->assignedReg = nullptr; interval->physReg = REG_NA; updateAssignedInterval(physRegRecord, nullptr, interval->registerType); } else { interval->isActive = true; interval->assignedReg = physRegRecord; updateAssignedInterval(physRegRecord, interval, interval->registerType); } } void LinearScan::writeRegisters(RefPosition* currentRefPosition, GenTree* tree) { lsraAssignRegToTree(tree, currentRefPosition->assignedReg(), currentRefPosition->getMultiRegIdx()); } //------------------------------------------------------------------------ // insertCopyOrReload: Insert a copy in the case where a tree node value must be moved // to a different register at the point of use (GT_COPY), or it is reloaded to a different register // than the one it was spilled from (GT_RELOAD). // // Arguments: // block - basic block in which GT_COPY/GT_RELOAD is inserted. // tree - This is the node to copy or reload. // Insert copy or reload node between this node and its parent. // multiRegIdx - register position of tree node for which copy or reload is needed. // refPosition - The RefPosition at which copy or reload will take place. // // Notes: // The GT_COPY or GT_RELOAD will be inserted in the proper spot in execution order where the reload is to occur. // // For example, for this tree (numbers are execution order, lower is earlier and higher is later): // // +---------+----------+ // | GT_ADD (3) | // +---------+----------+ // | // / '\' // / '\' // / '\' // +-------------------+ +----------------------+ // | x (1) | "tree" | y (2) | // +-------------------+ +----------------------+ // // generate this tree: // // +---------+----------+ // | GT_ADD (4) | // +---------+----------+ // | // / '\' // / '\' // / '\' // +-------------------+ +----------------------+ // | GT_RELOAD (3) | | y (2) | // +-------------------+ +----------------------+ // | // +-------------------+ // | x (1) | "tree" // +-------------------+ // // Note in particular that the GT_RELOAD node gets inserted in execution order immediately before the parent of "tree", // which seems a bit weird since normally a node's parent (in this case, the parent of "x", GT_RELOAD in the "after" // picture) immediately follows all of its children (that is, normally the execution ordering is postorder). // The ordering must be this weird "out of normal order" way because the "x" node is being spilled, probably // because the expression in the tree represented above by "y" has high register requirements. We don't want // to reload immediately, of course. So we put GT_RELOAD where the reload should actually happen. // // Note that GT_RELOAD is required when we reload to a different register than the one we spilled to. It can also be // used if we reload to the same register. Normally, though, in that case we just mark the node with GTF_SPILLED, // and the unspilling code automatically reuses the same register, and does the reload when it notices that flag // when considering a node's operands. // void LinearScan::insertCopyOrReload(BasicBlock* block, GenTree* tree, unsigned multiRegIdx, RefPosition* refPosition) { LIR::Range& blockRange = LIR::AsRange(block); LIR::Use treeUse; bool foundUse = blockRange.TryGetUse(tree, &treeUse); assert(foundUse); GenTree* parent = treeUse.User(); genTreeOps oper; if (refPosition->reload) { oper = GT_RELOAD; } else { oper = GT_COPY; INTRACK_STATS(updateLsraStat(STAT_COPY_REG, block->bbNum)); } // If the parent is a reload/copy node, then tree must be a multi-reg node // that has already had one of its registers spilled. // It is possible that one of its RefTypeDef positions got spilled and the next // use of it requires it to be in a different register. // // In this case set the i'th position reg of reload/copy node to the reg allocated // for copy/reload refPosition. Essentially a copy/reload node will have a reg // for each multi-reg position of its child. If there is a valid reg in i'th // position of GT_COPY or GT_RELOAD node then the corresponding result of its // child needs to be copied or reloaded to that reg. if (parent->IsCopyOrReload()) { noway_assert(parent->OperGet() == oper); noway_assert(tree->IsMultiRegNode()); GenTreeCopyOrReload* copyOrReload = parent->AsCopyOrReload(); noway_assert(copyOrReload->GetRegNumByIdx(multiRegIdx) == REG_NA); copyOrReload->SetRegNumByIdx(refPosition->assignedReg(), multiRegIdx); } else { var_types regType = tree->TypeGet(); if ((regType == TYP_STRUCT) && !tree->IsMultiRegNode()) { assert(compiler->compEnregStructLocals()); assert(tree->IsLocal()); const GenTreeLclVarCommon* lcl = tree->AsLclVarCommon(); const LclVarDsc* varDsc = compiler->lvaGetDesc(lcl); // We create struct copies with a primitive type so we don't bother copy node with parsing structHndl. // Note that for multiReg node we keep each regType in the tree and don't need this. regType = varDsc->GetRegisterType(lcl); assert(regType != TYP_UNDEF); } // Create the new node, with "tree" as its only child. GenTreeCopyOrReload* newNode = new (compiler, oper) GenTreeCopyOrReload(oper, regType, tree); assert(refPosition->registerAssignment != RBM_NONE); SetLsraAdded(newNode); newNode->SetRegNumByIdx(refPosition->assignedReg(), multiRegIdx); if (refPosition->copyReg) { // This is a TEMPORARY copy assert(isCandidateLocalRef(tree) || tree->IsMultiRegLclVar()); newNode->SetLastUse(multiRegIdx); } // Insert the copy/reload after the spilled node and replace the use of the original node with a use // of the copy/reload. blockRange.InsertAfter(tree, newNode); treeUse.ReplaceWith(newNode); } } #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE //------------------------------------------------------------------------ // insertUpperVectorSave: Insert code to save the upper half of a vector that lives // in a callee-save register at the point of a kill (the upper half is // not preserved). // // Arguments: // tree - This is the node before which we will insert the Save. // It will be a call or some node that turns into a call. // refPosition - The RefTypeUpperVectorSave RefPosition. // upperInterval - The Interval for the upper half of the large vector lclVar. // block - the BasicBlock containing the call. // void LinearScan::insertUpperVectorSave(GenTree* tree, RefPosition* refPosition, Interval* upperVectorInterval, BasicBlock* block) { JITDUMP("Inserting UpperVectorSave for RP #%d before %d.%s:\n", refPosition->rpNum, tree->gtTreeID, GenTree::OpName(tree->gtOper)); Interval* lclVarInterval = upperVectorInterval->relatedInterval; assert(lclVarInterval->isLocalVar == true); assert(refPosition->getInterval() == upperVectorInterval); regNumber lclVarReg = lclVarInterval->physReg; if (lclVarReg == REG_NA) { return; } #ifdef DEBUG if (tree->IsCall()) { // Make sure that we do not insert vector save before calls that does not return. assert(!tree->AsCall()->IsNoReturn()); } #endif LclVarDsc* varDsc = compiler->lvaGetDesc(lclVarInterval->varNum); assert(Compiler::varTypeNeedsPartialCalleeSave(varDsc->GetRegisterType())); // On Arm64, we must always have a register to save the upper half, // while on x86 we can spill directly to memory. regNumber spillReg = refPosition->assignedReg(); #ifdef TARGET_ARM64 bool spillToMem = refPosition->spillAfter; assert(spillReg != REG_NA); #else bool spillToMem = (spillReg == REG_NA); assert(!refPosition->spillAfter); #endif LIR::Range& blockRange = LIR::AsRange(block); // Insert the save before the call. GenTree* saveLcl = compiler->gtNewLclvNode(lclVarInterval->varNum, varDsc->lvType); saveLcl->SetRegNum(lclVarReg); SetLsraAdded(saveLcl); GenTreeSIMD* simdNode = compiler->gtNewSIMDNode(LargeVectorSaveType, saveLcl, SIMDIntrinsicUpperSave, varDsc->GetSimdBaseJitType(), genTypeSize(varDsc)); if (simdNode->GetSimdBaseJitType() == CORINFO_TYPE_UNDEF) { // There are a few scenarios where we can get a LCL_VAR which // doesn't know the underlying baseType. In that scenario, we // will just lie and say it is a float. Codegen doesn't actually // care what the type is but this avoids an assert that would // otherwise be fired from the more general checks that happen. simdNode->SetSimdBaseJitType(CORINFO_TYPE_FLOAT); } SetLsraAdded(simdNode); simdNode->SetRegNum(spillReg); if (spillToMem) { simdNode->gtFlags |= GTF_SPILL; upperVectorInterval->physReg = REG_NA; } else { assert((genRegMask(spillReg) & RBM_FLT_CALLEE_SAVED) != RBM_NONE); upperVectorInterval->physReg = spillReg; } blockRange.InsertBefore(tree, LIR::SeqTree(compiler, simdNode)); DISPTREE(simdNode); JITDUMP("\n"); } //------------------------------------------------------------------------ // insertUpperVectorRestore: Insert code to restore the upper half of a vector that has been partially spilled. // // Arguments: // tree - This is the node for which we will insert the Restore. // If non-null, it will be a use of the large vector lclVar. // If null, the Restore will be added to the end of the block. // upperVectorInterval - The Interval for the upper vector for the lclVar. // block - the BasicBlock into which we will be inserting the code. // // Notes: // In the case where 'tree' is non-null, we will insert the restore just prior to // its use, in order to ensure the proper ordering. // void LinearScan::insertUpperVectorRestore(GenTree* tree, RefPosition* refPosition, Interval* upperVectorInterval, BasicBlock* block) { JITDUMP("Inserting UpperVectorRestore for RP #%d ", refPosition->rpNum); Interval* lclVarInterval = upperVectorInterval->relatedInterval; assert(lclVarInterval->isLocalVar == true); regNumber lclVarReg = lclVarInterval->physReg; // We should not call this method if the lclVar is not in a register (we should have simply marked the entire // lclVar as spilled). assert(lclVarReg != REG_NA); LclVarDsc* varDsc = compiler->lvaGetDesc(lclVarInterval->varNum); assert(Compiler::varTypeNeedsPartialCalleeSave(varDsc->GetRegisterType())); GenTree* restoreLcl = nullptr; restoreLcl = compiler->gtNewLclvNode(lclVarInterval->varNum, varDsc->lvType); restoreLcl->SetRegNum(lclVarReg); SetLsraAdded(restoreLcl); GenTreeSIMD* simdNode = compiler->gtNewSIMDNode(varDsc->TypeGet(), restoreLcl, SIMDIntrinsicUpperRestore, varDsc->GetSimdBaseJitType(), genTypeSize(varDsc->lvType)); if (simdNode->GetSimdBaseJitType() == CORINFO_TYPE_UNDEF) { // There are a few scenarios where we can get a LCL_VAR which // doesn't know the underlying baseType. In that scenario, we // will just lie and say it is a float. Codegen doesn't actually // care what the type is but this avoids an assert that would // otherwise be fired from the more general checks that happen. simdNode->SetSimdBaseJitType(CORINFO_TYPE_FLOAT); } regNumber restoreReg = upperVectorInterval->physReg; SetLsraAdded(simdNode); if (restoreReg == REG_NA) { // We need a stack location for this. assert(lclVarInterval->isSpilled); #ifdef TARGET_AMD64 assert(refPosition->assignedReg() == REG_NA); simdNode->gtFlags |= GTF_NOREG_AT_USE; #else simdNode->gtFlags |= GTF_SPILLED; assert(refPosition->assignedReg() != REG_NA); restoreReg = refPosition->assignedReg(); #endif } simdNode->SetRegNum(restoreReg); LIR::Range& blockRange = LIR::AsRange(block); if (tree != nullptr) { JITDUMP("before %d.%s:\n", tree->gtTreeID, GenTree::OpName(tree->gtOper)); LIR::Use treeUse; bool foundUse = blockRange.TryGetUse(tree, &treeUse); assert(foundUse); // We need to insert the restore prior to the use, not (necessarily) immediately after the lclVar. blockRange.InsertBefore(treeUse.User(), LIR::SeqTree(compiler, simdNode)); } else { JITDUMP("at end of " FMT_BB ":\n", block->bbNum); if (block->KindIs(BBJ_COND, BBJ_SWITCH)) { noway_assert(!blockRange.IsEmpty()); GenTree* branch = blockRange.LastNode(); assert(branch->OperIsConditionalJump() || branch->OperGet() == GT_SWITCH_TABLE || branch->OperGet() == GT_SWITCH); blockRange.InsertBefore(branch, LIR::SeqTree(compiler, simdNode)); } else { assert(block->KindIs(BBJ_NONE, BBJ_ALWAYS)); blockRange.InsertAtEnd(LIR::SeqTree(compiler, simdNode)); } } DISPTREE(simdNode); JITDUMP("\n"); } #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE //------------------------------------------------------------------------ // initMaxSpill: Initializes the LinearScan members used to track the max number // of concurrent spills. This is needed so that we can set the // fields in Compiler, so that the code generator, in turn can // allocate the right number of spill locations. // // Arguments: // None. // // Return Value: // None. // // Assumptions: // This is called before any calls to updateMaxSpill(). void LinearScan::initMaxSpill() { needDoubleTmpForFPCall = false; needFloatTmpForFPCall = false; for (int i = 0; i < TYP_COUNT; i++) { maxSpill[i] = 0; currentSpill[i] = 0; } } //------------------------------------------------------------------------ // recordMaxSpill: Sets the fields in Compiler for the max number of concurrent spills. // (See the comment on initMaxSpill.) // // Arguments: // None. // // Return Value: // None. // // Assumptions: // This is called after updateMaxSpill() has been called for all "real" // RefPositions. void LinearScan::recordMaxSpill() { // Note: due to the temp normalization process (see tmpNormalizeType) // only a few types should actually be seen here. JITDUMP("Recording the maximum number of concurrent spills:\n"); #ifdef TARGET_X86 var_types returnType = RegSet::tmpNormalizeType(compiler->info.compRetType); if (needDoubleTmpForFPCall || (returnType == TYP_DOUBLE)) { JITDUMP("Adding a spill temp for moving a double call/return value between xmm reg and x87 stack.\n"); maxSpill[TYP_DOUBLE] += 1; } if (needFloatTmpForFPCall || (returnType == TYP_FLOAT)) { JITDUMP("Adding a spill temp for moving a float call/return value between xmm reg and x87 stack.\n"); maxSpill[TYP_FLOAT] += 1; } #endif // TARGET_X86 compiler->codeGen->regSet.tmpBeginPreAllocateTemps(); for (int i = 0; i < TYP_COUNT; i++) { if (var_types(i) != RegSet::tmpNormalizeType(var_types(i))) { // Only normalized types should have anything in the maxSpill array. // We assume here that if type 'i' does not normalize to itself, then // nothing else normalizes to 'i', either. assert(maxSpill[i] == 0); } if (maxSpill[i] != 0) { JITDUMP(" %s: %d\n", varTypeName(var_types(i)), maxSpill[i]); compiler->codeGen->regSet.tmpPreAllocateTemps(var_types(i), maxSpill[i]); } } JITDUMP("\n"); } //------------------------------------------------------------------------ // updateMaxSpill: Update the maximum number of concurrent spills // // Arguments: // refPosition - the current RefPosition being handled // // Return Value: // None. // // Assumptions: // The RefPosition has an associated interval (getInterval() will // otherwise assert). // // Notes: // This is called for each "real" RefPosition during the writeback // phase of LSRA. It keeps track of how many concurrently-live // spills there are, and the largest number seen so far. void LinearScan::updateMaxSpill(RefPosition* refPosition) { RefType refType = refPosition->refType; #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE if ((refType == RefTypeUpperVectorSave) || (refType == RefTypeUpperVectorRestore)) { Interval* interval = refPosition->getInterval(); // If this is not an 'upperVector', it must be a tree temp that has been already // (fully) spilled. if (!interval->isUpperVector) { assert(interval->firstRefPosition->spillAfter); } else { // The UpperVector RefPositions spill to the localVar's home location. Interval* lclVarInterval = interval->relatedInterval; assert(lclVarInterval->isSpilled || (!refPosition->spillAfter && !refPosition->reload)); } return; } #endif // !FEATURE_PARTIAL_SIMD_CALLEE_SAVE if (refPosition->spillAfter || refPosition->reload || (refPosition->RegOptional() && refPosition->assignedReg() == REG_NA)) { Interval* interval = refPosition->getInterval(); if (!interval->isLocalVar) { GenTree* treeNode = refPosition->treeNode; if (treeNode == nullptr) { assert(RefTypeIsUse(refType)); treeNode = interval->firstRefPosition->treeNode; } assert(treeNode != nullptr); // The tmp allocation logic 'normalizes' types to a small number of // types that need distinct stack locations from each other. // Those types are currently gc refs, byrefs, <= 4 byte non-GC items, // 8-byte non-GC items, and 16-byte or 32-byte SIMD vectors. // LSRA is agnostic to those choices but needs // to know what they are here. var_types type; if (!treeNode->IsMultiRegNode()) { type = getDefType(treeNode); } else { type = treeNode->GetRegTypeByIndex(refPosition->getMultiRegIdx()); } type = RegSet::tmpNormalizeType(type); if (refPosition->spillAfter && !refPosition->reload) { currentSpill[type]++; if (currentSpill[type] > maxSpill[type]) { maxSpill[type] = currentSpill[type]; } } else if (refPosition->reload) { assert(currentSpill[type] > 0); currentSpill[type]--; } else if (refPosition->RegOptional() && refPosition->assignedReg() == REG_NA) { // A spill temp not getting reloaded into a reg because it is // marked as allocate if profitable and getting used from its // memory location. To properly account max spill for typ we // decrement spill count. assert(RefTypeIsUse(refType)); assert(currentSpill[type] > 0); currentSpill[type]--; } JITDUMP(" Max spill for %s is %d\n", varTypeName(type), maxSpill[type]); } } } // This is the final phase of register allocation. It writes the register assignments to // the tree, and performs resolution across joins and backedges. // void LinearScan::resolveRegisters() { // Iterate over the tree and the RefPositions in lockstep // - annotate the tree with register assignments by setting GetRegNum() or gtRegPair (for longs) // on the tree node // - track globally-live var locations // - add resolution points at split/merge/critical points as needed // Need to use the same traversal order as the one that assigns the location numbers. // Dummy RefPositions have been added at any split, join or critical edge, at the // point where resolution may be required. These are located: // - for a split, at the top of the non-adjacent block // - for a join, at the bottom of the non-adjacent joining block // - for a critical edge, at the top of the target block of each critical // edge. // Note that a target block may have multiple incoming critical or split edges // // These RefPositions record the expected location of the Interval at that point. // At each branch, we identify the location of each liveOut interval, and check // against the RefPositions at the target. BasicBlock* block; LsraLocation currentLocation = MinLocation; // Clear register assignments - these will be reestablished as lclVar defs (including RefTypeParamDefs) // are encountered. if (enregisterLocalVars) { for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg)) { RegRecord* physRegRecord = getRegisterRecord(reg); Interval* assignedInterval = physRegRecord->assignedInterval; if (assignedInterval != nullptr) { assignedInterval->assignedReg = nullptr; assignedInterval->physReg = REG_NA; } physRegRecord->assignedInterval = nullptr; physRegRecord->recentRefPosition = nullptr; } // Clear "recentRefPosition" for lclVar intervals for (unsigned varIndex = 0; varIndex < compiler->lvaTrackedCount; varIndex++) { if (localVarIntervals[varIndex] != nullptr) { localVarIntervals[varIndex]->recentRefPosition = nullptr; localVarIntervals[varIndex]->isActive = false; } else { assert(!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate); } } } // handle incoming arguments and special temps RefPositionIterator refPosIterator = refPositions.begin(); RefPosition* currentRefPosition = &refPosIterator; if (enregisterLocalVars) { VarToRegMap entryVarToRegMap = inVarToRegMaps[compiler->fgFirstBB->bbNum]; for (; refPosIterator != refPositions.end() && (currentRefPosition->refType == RefTypeParamDef || currentRefPosition->refType == RefTypeZeroInit); ++refPosIterator, currentRefPosition = &refPosIterator) { Interval* interval = currentRefPosition->getInterval(); assert(interval != nullptr && interval->isLocalVar); resolveLocalRef(nullptr, nullptr, currentRefPosition); regNumber reg = REG_STK; int varIndex = interval->getVarIndex(compiler); if (!currentRefPosition->spillAfter && currentRefPosition->registerAssignment != RBM_NONE) { reg = currentRefPosition->assignedReg(); } else { reg = REG_STK; interval->isActive = false; } setVarReg(entryVarToRegMap, varIndex, reg); } } else { assert(refPosIterator == refPositions.end() || (refPosIterator->refType != RefTypeParamDef && refPosIterator->refType != RefTypeZeroInit)); } // write back assignments for (block = startBlockSequence(); block != nullptr; block = moveToNextBlock()) { assert(curBBNum == block->bbNum); if (enregisterLocalVars) { // Record the var locations at the start of this block. // (If it's fgFirstBB, we've already done that above, see entryVarToRegMap) curBBStartLocation = currentRefPosition->nodeLocation; if (block != compiler->fgFirstBB) { processBlockStartLocations(block); } // Handle the DummyDefs, updating the incoming var location. for (; refPosIterator != refPositions.end() && currentRefPosition->refType == RefTypeDummyDef; ++refPosIterator, currentRefPosition = &refPosIterator) { assert(currentRefPosition->isIntervalRef()); // Don't mark dummy defs as reload currentRefPosition->reload = false; resolveLocalRef(nullptr, nullptr, currentRefPosition); regNumber reg; if (currentRefPosition->registerAssignment != RBM_NONE) { reg = currentRefPosition->assignedReg(); } else { reg = REG_STK; currentRefPosition->getInterval()->isActive = false; } setInVarRegForBB(curBBNum, currentRefPosition->getInterval()->varNum, reg); } } // The next RefPosition should be for the block. Move past it. assert(refPosIterator != refPositions.end()); assert(currentRefPosition->refType == RefTypeBB); ++refPosIterator; currentRefPosition = &refPosIterator; // Handle the RefPositions for the block for (; refPosIterator != refPositions.end() && currentRefPosition->refType != RefTypeBB && currentRefPosition->refType != RefTypeDummyDef; ++refPosIterator, currentRefPosition = &refPosIterator) { currentLocation = currentRefPosition->nodeLocation; // Ensure that the spill & copy info is valid. // First, if it's reload, it must not be copyReg or moveReg assert(!currentRefPosition->reload || (!currentRefPosition->copyReg && !currentRefPosition->moveReg)); // If it's copyReg it must not be moveReg, and vice-versa assert(!currentRefPosition->copyReg || !currentRefPosition->moveReg); switch (currentRefPosition->refType) { #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE case RefTypeUpperVectorSave: case RefTypeUpperVectorRestore: #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE case RefTypeUse: case RefTypeDef: // These are the ones we're interested in break; case RefTypeKill: case RefTypeFixedReg: // These require no handling at resolution time assert(currentRefPosition->referent != nullptr); currentRefPosition->referent->recentRefPosition = currentRefPosition; continue; case RefTypeExpUse: // Ignore the ExpUse cases - a RefTypeExpUse would only exist if the // variable is dead at the entry to the next block. So we'll mark // it as in its current location and resolution will take care of any // mismatch. assert(getNextBlock() == nullptr || !VarSetOps::IsMember(compiler, getNextBlock()->bbLiveIn, currentRefPosition->getInterval()->getVarIndex(compiler))); currentRefPosition->referent->recentRefPosition = currentRefPosition; continue; case RefTypeKillGCRefs: // No action to take at resolution time, and no interval to update recentRefPosition for. continue; case RefTypeDummyDef: case RefTypeParamDef: case RefTypeZeroInit: // Should have handled all of these already default: unreached(); break; } updateMaxSpill(currentRefPosition); GenTree* treeNode = currentRefPosition->treeNode; #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE if (currentRefPosition->refType == RefTypeUpperVectorSave) { // The treeNode is a call or something that might become one. noway_assert(treeNode != nullptr); // If the associated interval is an UpperVector, this must be a RefPosition for a LargeVectorType // LocalVar. // Otherwise, this is a non-lclVar interval that has been spilled, and we don't need to do anything. Interval* interval = currentRefPosition->getInterval(); if (interval->isUpperVector) { Interval* localVarInterval = interval->relatedInterval; if ((localVarInterval->physReg != REG_NA) && !localVarInterval->isPartiallySpilled) { if (!currentRefPosition->skipSaveRestore) { // If the localVar is in a register, it must be in a register that is not trashed by // the current node (otherwise it would have already been spilled). assert((genRegMask(localVarInterval->physReg) & getKillSetForNode(treeNode)) == RBM_NONE); // If we have allocated a register to spill it to, we will use that; otherwise, we will // spill it to the stack. We can use as a temp register any non-arg caller-save register. currentRefPosition->referent->recentRefPosition = currentRefPosition; insertUpperVectorSave(treeNode, currentRefPosition, currentRefPosition->getInterval(), block); } localVarInterval->isPartiallySpilled = true; } } else { // This is a non-lclVar interval that must have been spilled. assert(!currentRefPosition->getInterval()->isLocalVar); assert(currentRefPosition->getInterval()->firstRefPosition->spillAfter); } continue; } else if (currentRefPosition->refType == RefTypeUpperVectorRestore) { // Since we don't do partial restores of tree temp intervals, this must be an upperVector. Interval* interval = currentRefPosition->getInterval(); Interval* localVarInterval = interval->relatedInterval; assert(interval->isUpperVector && (localVarInterval != nullptr)); if (localVarInterval->physReg != REG_NA) { assert(localVarInterval->isPartiallySpilled); assert((localVarInterval->assignedReg != nullptr) && (localVarInterval->assignedReg->regNum == localVarInterval->physReg) && (localVarInterval->assignedReg->assignedInterval == localVarInterval)); if (!currentRefPosition->skipSaveRestore) { insertUpperVectorRestore(treeNode, currentRefPosition, interval, block); } } localVarInterval->isPartiallySpilled = false; } #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE // Most uses won't actually need to be recorded (they're on the def). // In those cases, treeNode will be nullptr. if (treeNode == nullptr) { // This is either a use, a dead def, or a field of a struct Interval* interval = currentRefPosition->getInterval(); assert(currentRefPosition->refType == RefTypeUse || currentRefPosition->registerAssignment == RBM_NONE || interval->isStructField || interval->IsUpperVector()); // TODO-Review: Need to handle the case where any of the struct fields // are reloaded/spilled at this use assert(!interval->isStructField || (currentRefPosition->reload == false && currentRefPosition->spillAfter == false)); if (interval->isLocalVar && !interval->isStructField) { LclVarDsc* varDsc = interval->getLocalVar(compiler); // This must be a dead definition. We need to mark the lclVar // so that it's not considered a candidate for lvRegister, as // this dead def will have to go to the stack. assert(currentRefPosition->refType == RefTypeDef); varDsc->SetRegNum(REG_STK); } continue; } assert(currentRefPosition->isIntervalRef()); if (currentRefPosition->getInterval()->isInternal) { treeNode->gtRsvdRegs |= currentRefPosition->registerAssignment; } else { writeRegisters(currentRefPosition, treeNode); if (treeNode->OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR) && currentRefPosition->getInterval()->isLocalVar) { resolveLocalRef(block, treeNode->AsLclVar(), currentRefPosition); } // Mark spill locations on temps // (local vars are handled in resolveLocalRef, above) // Note that the tree node will be changed from GTF_SPILL to GTF_SPILLED // in codegen, taking care of the "reload" case for temps else if (currentRefPosition->spillAfter || (currentRefPosition->nextRefPosition != nullptr && currentRefPosition->nextRefPosition->moveReg)) { if (treeNode != nullptr) { if (currentRefPosition->spillAfter) { treeNode->gtFlags |= GTF_SPILL; // If this is a constant interval that is reusing a pre-existing value, we actually need // to generate the value at this point in order to spill it. if (treeNode->IsReuseRegVal()) { treeNode->ResetReuseRegVal(); } // In case of multi-reg node, also set spill flag on the // register specified by multi-reg index of current RefPosition. // Note that the spill flag on treeNode indicates that one or // more its allocated registers are in that state. if (treeNode->IsMultiRegCall()) { GenTreeCall* call = treeNode->AsCall(); call->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx()); } #if FEATURE_ARG_SPLIT else if (treeNode->OperIsPutArgSplit()) { GenTreePutArgSplit* splitArg = treeNode->AsPutArgSplit(); splitArg->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx()); } #ifdef TARGET_ARM else if (compFeatureArgSplit() && treeNode->OperIsMultiRegOp()) { GenTreeMultiRegOp* multiReg = treeNode->AsMultiRegOp(); multiReg->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx()); } #endif // TARGET_ARM #endif // FEATURE_ARG_SPLIT } // If the value is reloaded or moved to a different register, we need to insert // a node to hold the register to which it should be reloaded RefPosition* nextRefPosition = currentRefPosition->nextRefPosition; noway_assert(nextRefPosition != nullptr); if (INDEBUG(alwaysInsertReload() ||) nextRefPosition->assignedReg() != currentRefPosition->assignedReg()) { #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // Note that we asserted above that this is an Interval RefPosition. Interval* currentInterval = currentRefPosition->getInterval(); if (!currentInterval->isUpperVector && nextRefPosition->refType == RefTypeUpperVectorSave) { // The currentRefPosition is a spill of a tree temp. // These have no associated Restore, as we always spill if the vector is // in a register when this is encountered. // The nextRefPosition we're interested in (where we may need to insert a // reload or flag as GTF_NOREG_AT_USE) is the subsequent RefPosition. assert(!currentInterval->isLocalVar); nextRefPosition = nextRefPosition->nextRefPosition; assert(nextRefPosition->refType != RefTypeUpperVectorSave); } // UpperVector intervals may have unique assignments at each reference. if (!currentInterval->isUpperVector) #endif { if (nextRefPosition->assignedReg() != REG_NA) { insertCopyOrReload(block, treeNode, currentRefPosition->getMultiRegIdx(), nextRefPosition); } else { assert(nextRefPosition->RegOptional()); // In case of tree temps, if def is spilled and use didn't // get a register, set a flag on tree node to be treated as // contained at the point of its use. if (currentRefPosition->spillAfter && currentRefPosition->refType == RefTypeDef && nextRefPosition->refType == RefTypeUse) { assert(nextRefPosition->treeNode == nullptr); treeNode->gtFlags |= GTF_NOREG_AT_USE; } } } } } // We should never have to "spill after" a temp use, since // they're single use else { unreached(); } } } } if (enregisterLocalVars) { processBlockEndLocations(block); } } if (enregisterLocalVars) { #ifdef DEBUG if (VERBOSE) { printf("-----------------------\n"); printf("RESOLVING BB BOUNDARIES\n"); printf("-----------------------\n"); printf("Resolution Candidates: "); dumpConvertedVarSet(compiler, resolutionCandidateVars); printf("\n"); printf("Has %sCritical Edges\n\n", hasCriticalEdges ? "" : "No "); printf("Prior to Resolution\n"); for (BasicBlock* const block : compiler->Blocks()) { printf("\n" FMT_BB, block->bbNum); if (block->hasEHBoundaryIn()) { JITDUMP(" EH flow in"); } if (block->hasEHBoundaryOut()) { JITDUMP(" EH flow out"); } printf("\nuse def in out\n"); dumpConvertedVarSet(compiler, block->bbVarUse); printf("\n"); dumpConvertedVarSet(compiler, block->bbVarDef); printf("\n"); dumpConvertedVarSet(compiler, block->bbLiveIn); printf("\n"); dumpConvertedVarSet(compiler, block->bbLiveOut); printf("\n"); dumpInVarToRegMap(block); dumpOutVarToRegMap(block); } printf("\n\n"); } #endif // DEBUG resolveEdges(); // Verify register assignments on variables unsigned lclNum; LclVarDsc* varDsc; for (lclNum = 0, varDsc = compiler->lvaTable; lclNum < compiler->lvaCount; lclNum++, varDsc++) { if (!isCandidateVar(varDsc)) { varDsc->SetRegNum(REG_STK); } else { Interval* interval = getIntervalForLocalVar(varDsc->lvVarIndex); // Determine initial position for parameters if (varDsc->lvIsParam) { regMaskTP initialRegMask = interval->firstRefPosition->registerAssignment; regNumber initialReg = (initialRegMask == RBM_NONE || interval->firstRefPosition->spillAfter) ? REG_STK : genRegNumFromMask(initialRegMask); #ifdef TARGET_ARM if (varTypeIsMultiReg(varDsc)) { // TODO-ARM-NYI: Map the hi/lo intervals back to lvRegNum and GetOtherReg() (these should NYI // before this) assert(!"Multi-reg types not yet supported"); } else #endif // TARGET_ARM { varDsc->SetArgInitReg(initialReg); JITDUMP(" Set V%02u argument initial register to %s\n", lclNum, getRegName(initialReg)); } // Stack args that are part of dependently-promoted structs should never be register candidates (see // LinearScan::isRegCandidate). assert(varDsc->lvIsRegArg || !compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc)); } // If lvRegNum is REG_STK, that means that either no register // was assigned, or (more likely) that the same register was not // used for all references. In that case, codegen gets the register // from the tree node. if (varDsc->GetRegNum() == REG_STK || interval->isSpilled || interval->isSplit) { // For codegen purposes, we'll set lvRegNum to whatever register // it's currently in as we go. // However, we never mark an interval as lvRegister if it has either been spilled // or split. varDsc->lvRegister = false; // Skip any dead defs or exposed uses // (first use exposed will only occur when there is no explicit initialization) RefPosition* firstRefPosition = interval->firstRefPosition; while ((firstRefPosition != nullptr) && (firstRefPosition->refType == RefTypeExpUse)) { firstRefPosition = firstRefPosition->nextRefPosition; } if (firstRefPosition == nullptr) { // Dead interval varDsc->lvLRACandidate = false; if (varDsc->lvRefCnt() == 0) { varDsc->lvOnFrame = false; } else { // We may encounter cases where a lclVar actually has no references, but // a non-zero refCnt. For safety (in case this is some "hidden" lclVar that we're // not correctly recognizing), we'll mark those as needing a stack location. // TODO-Cleanup: Make this an assert if/when we correct the refCnt // updating. varDsc->lvOnFrame = true; } } else { // If the interval was not spilled, it doesn't need a stack location. if (!interval->isSpilled) { varDsc->lvOnFrame = false; } if (firstRefPosition->registerAssignment == RBM_NONE || firstRefPosition->spillAfter) { // Either this RefPosition is spilled, or regOptional or it is not a "real" def or use assert( firstRefPosition->spillAfter || firstRefPosition->RegOptional() || (firstRefPosition->refType != RefTypeDef && firstRefPosition->refType != RefTypeUse)); varDsc->SetRegNum(REG_STK); } else { varDsc->SetRegNum(firstRefPosition->assignedReg()); } } } else { { varDsc->lvRegister = true; varDsc->lvOnFrame = false; } #ifdef DEBUG regMaskTP registerAssignment = genRegMask(varDsc->GetRegNum()); assert(!interval->isSpilled && !interval->isSplit); RefPosition* refPosition = interval->firstRefPosition; assert(refPosition != nullptr); while (refPosition != nullptr) { // All RefPositions must match, except for dead definitions, // copyReg/moveReg and RefTypeExpUse positions if (refPosition->registerAssignment != RBM_NONE && !refPosition->copyReg && !refPosition->moveReg && refPosition->refType != RefTypeExpUse) { assert(refPosition->registerAssignment == registerAssignment); } refPosition = refPosition->nextRefPosition; } #endif // DEBUG } } } } #ifdef DEBUG if (VERBOSE) { printf("Trees after linear scan register allocator (LSRA)\n"); compiler->fgDispBasicBlocks(true); } verifyFinalAllocation(); #endif // DEBUG compiler->raMarkStkVars(); recordMaxSpill(); // TODO-CQ: Review this comment and address as needed. // Change all unused promoted non-argument struct locals to a non-GC type (in this case TYP_INT) // so that the gc tracking logic and lvMustInit logic will ignore them. // Extract the code that does this from raAssignVars, and call it here. // PRECONDITIONS: Ensure that lvPromoted is set on promoted structs, if and // only if it is promoted on all paths. // Call might be something like: // compiler->BashUnusedStructLocals(); } // //------------------------------------------------------------------------ // insertMove: Insert a move of a lclVar with the given lclNum into the given block. // // Arguments: // block - the BasicBlock into which the move will be inserted. // insertionPoint - the instruction before which to insert the move // lclNum - the lclNum of the var to be moved // fromReg - the register from which the var is moving // toReg - the register to which the var is moving // // Return Value: // None. // // Notes: // If insertionPoint is non-NULL, insert before that instruction; // otherwise, insert "near" the end (prior to the branch, if any). // If fromReg or toReg is REG_STK, then move from/to memory, respectively. void LinearScan::insertMove( BasicBlock* block, GenTree* insertionPoint, unsigned lclNum, regNumber fromReg, regNumber toReg) { LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum); // the lclVar must be a register candidate assert(isRegCandidate(varDsc)); // One or both MUST be a register assert(fromReg != REG_STK || toReg != REG_STK); // They must not be the same register. assert(fromReg != toReg); // This var can't be marked lvRegister now varDsc->SetRegNum(REG_STK); GenTree* src = compiler->gtNewLclvNode(lclNum, varDsc->TypeGet()); SetLsraAdded(src); // There are three cases we need to handle: // - We are loading a lclVar from the stack. // - We are storing a lclVar to the stack. // - We are copying a lclVar between registers. // // In the first and second cases, the lclVar node will be marked with GTF_SPILLED and GTF_SPILL, respectively. // It is up to the code generator to ensure that any necessary normalization is done when loading or storing the // lclVar's value. // // In the third case, we generate GT_COPY(GT_LCL_VAR) and type each node with the normalized type of the lclVar. // This is safe because a lclVar is always normalized once it is in a register. GenTree* dst = src; if (fromReg == REG_STK) { src->gtFlags |= GTF_SPILLED; src->SetRegNum(toReg); } else if (toReg == REG_STK) { src->gtFlags |= GTF_SPILL; src->SetRegNum(fromReg); } else { var_types movType = varDsc->GetRegisterType(); src->gtType = movType; dst = new (compiler, GT_COPY) GenTreeCopyOrReload(GT_COPY, movType, src); // This is the new home of the lclVar - indicate that by clearing the GTF_VAR_DEATH flag. // Note that if src is itself a lastUse, this will have no effect. dst->gtFlags &= ~(GTF_VAR_DEATH); src->SetRegNum(fromReg); dst->SetRegNum(toReg); SetLsraAdded(dst); } dst->SetUnusedValue(); LIR::Range treeRange = LIR::SeqTree(compiler, dst); LIR::Range& blockRange = LIR::AsRange(block); if (insertionPoint != nullptr) { blockRange.InsertBefore(insertionPoint, std::move(treeRange)); } else { // Put the copy at the bottom GenTree* lastNode = blockRange.LastNode(); if (block->KindIs(BBJ_COND, BBJ_SWITCH)) { noway_assert(!blockRange.IsEmpty()); GenTree* branch = lastNode; assert(branch->OperIsConditionalJump() || branch->OperGet() == GT_SWITCH_TABLE || branch->OperGet() == GT_SWITCH); blockRange.InsertBefore(branch, std::move(treeRange)); } else { // These block kinds don't have a branch at the end. assert((lastNode == nullptr) || (!lastNode->OperIsConditionalJump() && !lastNode->OperIs(GT_SWITCH_TABLE, GT_SWITCH, GT_RETURN, GT_RETFILT))); blockRange.InsertAtEnd(std::move(treeRange)); } } } void LinearScan::insertSwap( BasicBlock* block, GenTree* insertionPoint, unsigned lclNum1, regNumber reg1, unsigned lclNum2, regNumber reg2) { #ifdef DEBUG if (VERBOSE) { const char* insertionPointString = "top"; if (insertionPoint == nullptr) { insertionPointString = "bottom"; } printf(" " FMT_BB " %s: swap V%02u in %s with V%02u in %s\n", block->bbNum, insertionPointString, lclNum1, getRegName(reg1), lclNum2, getRegName(reg2)); } #endif // DEBUG LclVarDsc* varDsc1 = compiler->lvaGetDesc(lclNum1); LclVarDsc* varDsc2 = compiler->lvaGetDesc(lclNum2); assert(reg1 != REG_STK && reg1 != REG_NA && reg2 != REG_STK && reg2 != REG_NA); GenTree* lcl1 = compiler->gtNewLclvNode(lclNum1, varDsc1->TypeGet()); lcl1->SetRegNum(reg1); SetLsraAdded(lcl1); GenTree* lcl2 = compiler->gtNewLclvNode(lclNum2, varDsc2->TypeGet()); lcl2->SetRegNum(reg2); SetLsraAdded(lcl2); GenTree* swap = compiler->gtNewOperNode(GT_SWAP, TYP_VOID, lcl1, lcl2); swap->SetRegNum(REG_NA); SetLsraAdded(swap); lcl1->gtNext = lcl2; lcl2->gtPrev = lcl1; lcl2->gtNext = swap; swap->gtPrev = lcl2; LIR::Range swapRange = LIR::SeqTree(compiler, swap); LIR::Range& blockRange = LIR::AsRange(block); if (insertionPoint != nullptr) { blockRange.InsertBefore(insertionPoint, std::move(swapRange)); } else { // Put the copy at the bottom // If there's a branch, make an embedded statement that executes just prior to the branch if (block->KindIs(BBJ_COND, BBJ_SWITCH)) { noway_assert(!blockRange.IsEmpty()); GenTree* branch = blockRange.LastNode(); assert(branch->OperIsConditionalJump() || branch->OperGet() == GT_SWITCH_TABLE || branch->OperGet() == GT_SWITCH); blockRange.InsertBefore(branch, std::move(swapRange)); } else { assert(block->KindIs(BBJ_NONE, BBJ_ALWAYS)); blockRange.InsertAtEnd(std::move(swapRange)); } } } //------------------------------------------------------------------------ // getTempRegForResolution: Get a free register to use for resolution code. // // Arguments: // fromBlock - The "from" block on the edge being resolved. // toBlock - The "to"block on the edge // type - the type of register required // // Return Value: // Returns a register that is free on the given edge, or REG_NA if none is available. // // Notes: // It is up to the caller to check the return value, and to determine whether a register is // available, and to handle that case appropriately. // It is also up to the caller to cache the return value, as this is not cheap to compute. regNumber LinearScan::getTempRegForResolution(BasicBlock* fromBlock, BasicBlock* toBlock, var_types type) { // TODO-Throughput: This would be much more efficient if we add RegToVarMaps instead of VarToRegMaps // and they would be more space-efficient as well. VarToRegMap fromVarToRegMap = getOutVarToRegMap(fromBlock->bbNum); VarToRegMap toVarToRegMap = getInVarToRegMap(toBlock->bbNum); #ifdef TARGET_ARM regMaskTP freeRegs; if (type == TYP_DOUBLE) { // We have to consider all float registers for TYP_DOUBLE freeRegs = allRegs(TYP_FLOAT); } else { freeRegs = allRegs(type); } #else // !TARGET_ARM regMaskTP freeRegs = allRegs(type); #endif // !TARGET_ARM #ifdef DEBUG if (getStressLimitRegs() == LSRA_LIMIT_SMALL_SET) { return REG_NA; } #endif // DEBUG INDEBUG(freeRegs = stressLimitRegs(nullptr, freeRegs)); // We are only interested in the variables that are live-in to the "to" block. VarSetOps::Iter iter(compiler, toBlock->bbLiveIn); unsigned varIndex = 0; while (iter.NextElem(&varIndex) && freeRegs != RBM_NONE) { regNumber fromReg = getVarReg(fromVarToRegMap, varIndex); regNumber toReg = getVarReg(toVarToRegMap, varIndex); assert(fromReg != REG_NA && toReg != REG_NA); if (fromReg != REG_STK) { freeRegs &= ~genRegMask(fromReg, getIntervalForLocalVar(varIndex)->registerType); } if (toReg != REG_STK) { freeRegs &= ~genRegMask(toReg, getIntervalForLocalVar(varIndex)->registerType); } } #ifdef TARGET_ARM if (type == TYP_DOUBLE) { // Exclude any doubles for which the odd half isn't in freeRegs. freeRegs = freeRegs & ((freeRegs << 1) & RBM_ALLDOUBLE); } #endif if (freeRegs == RBM_NONE) { return REG_NA; } else { regNumber tempReg = genRegNumFromMask(genFindLowestBit(freeRegs)); return tempReg; } } #ifdef TARGET_ARM //------------------------------------------------------------------------ // addResolutionForDouble: Add resolution move(s) for TYP_DOUBLE interval // and update location. // // Arguments: // block - the BasicBlock into which the move will be inserted. // insertionPoint - the instruction before which to insert the move // sourceIntervals - maintains sourceIntervals[reg] which each 'reg' is associated with // location - maintains location[reg] which is the location of the var that was originally in 'reg'. // toReg - the register to which the var is moving // fromReg - the register from which the var is moving // resolveType - the type of resolution to be performed // // Return Value: // None. // // Notes: // It inserts at least one move and updates incoming parameter 'location'. // void LinearScan::addResolutionForDouble(BasicBlock* block, GenTree* insertionPoint, Interval** sourceIntervals, regNumberSmall* location, regNumber toReg, regNumber fromReg, ResolveType resolveType) { regNumber secondHalfTargetReg = REG_NEXT(fromReg); Interval* intervalToBeMoved1 = sourceIntervals[fromReg]; Interval* intervalToBeMoved2 = sourceIntervals[secondHalfTargetReg]; assert(!(intervalToBeMoved1 == nullptr && intervalToBeMoved2 == nullptr)); if (intervalToBeMoved1 != nullptr) { if (intervalToBeMoved1->registerType == TYP_DOUBLE) { // TYP_DOUBLE interval occupies a double register, i.e. two float registers. assert(intervalToBeMoved2 == nullptr); assert(genIsValidDoubleReg(toReg)); } else { // TYP_FLOAT interval occupies 1st half of double register, i.e. 1st float register assert(genIsValidFloatReg(toReg)); } addResolution(block, insertionPoint, intervalToBeMoved1, toReg, fromReg); JITDUMP(" (%s)\n", resolveTypeName[resolveType]); location[fromReg] = (regNumberSmall)toReg; } if (intervalToBeMoved2 != nullptr) { // TYP_FLOAT interval occupies 2nd half of double register. assert(intervalToBeMoved2->registerType == TYP_FLOAT); regNumber secondHalfTempReg = REG_NEXT(toReg); addResolution(block, insertionPoint, intervalToBeMoved2, secondHalfTempReg, secondHalfTargetReg); JITDUMP(" (%s)\n", resolveTypeName[resolveType]); location[secondHalfTargetReg] = (regNumberSmall)secondHalfTempReg; } return; } #endif // TARGET_ARM //------------------------------------------------------------------------ // addResolution: Add a resolution move of the given interval // // Arguments: // block - the BasicBlock into which the move will be inserted. // insertionPoint - the instruction before which to insert the move // interval - the interval of the var to be moved // toReg - the register to which the var is moving // fromReg - the register from which the var is moving // // Return Value: // None. // // Notes: // For joins, we insert at the bottom (indicated by an insertionPoint // of nullptr), while for splits we insert at the top. // This is because for joins 'block' is a pred of the join, while for splits it is a succ. // For critical edges, this function may be called twice - once to move from // the source (fromReg), if any, to the stack, in which case toReg will be // REG_STK, and we insert at the bottom (leave insertionPoint as nullptr). // The next time, we want to move from the stack to the destination (toReg), // in which case fromReg will be REG_STK, and we insert at the top. void LinearScan::addResolution( BasicBlock* block, GenTree* insertionPoint, Interval* interval, regNumber toReg, regNumber fromReg) { #ifdef DEBUG const char* insertionPointString; if (insertionPoint == nullptr) { // We can't add resolution to a register at the bottom of a block that has an EHBoundaryOut, // except in the case of the "EH Dummy" resolution from the stack. assert((block->bbNum > bbNumMaxBeforeResolution) || (fromReg == REG_STK) || !blockInfo[block->bbNum].hasEHBoundaryOut); insertionPointString = "bottom"; } else { // We can't add resolution at the top of a block that has an EHBoundaryIn, // except in the case of the "EH Dummy" resolution to the stack. assert((block->bbNum > bbNumMaxBeforeResolution) || (toReg == REG_STK) || !blockInfo[block->bbNum].hasEHBoundaryIn); insertionPointString = "top"; } // We should never add resolution move inside BBCallAlwaysPairTail. noway_assert(!block->isBBCallAlwaysPairTail()); #endif // DEBUG JITDUMP(" " FMT_BB " %s: move V%02u from ", block->bbNum, insertionPointString, interval->varNum); JITDUMP("%s to %s", getRegName(fromReg), getRegName(toReg)); insertMove(block, insertionPoint, interval->varNum, fromReg, toReg); if (fromReg == REG_STK || toReg == REG_STK) { assert(interval->isSpilled); } else { // We should have already marked this as spilled or split. assert((interval->isSpilled) || (interval->isSplit)); } INTRACK_STATS(updateLsraStat(STAT_RESOLUTION_MOV, block->bbNum)); } //------------------------------------------------------------------------ // handleOutgoingCriticalEdges: Performs the necessary resolution on all critical edges that feed out of 'block' // // Arguments: // block - the block with outgoing critical edges. // // Return Value: // None.. // // Notes: // For all outgoing critical edges (i.e. any successor of this block which is // a join edge), if there are any conflicts, split the edge by adding a new block, // and generate the resolution code into that block. void LinearScan::handleOutgoingCriticalEdges(BasicBlock* block) { VARSET_TP outResolutionSet(VarSetOps::Intersection(compiler, block->bbLiveOut, resolutionCandidateVars)); if (VarSetOps::IsEmpty(compiler, outResolutionSet)) { return; } VARSET_TP sameResolutionSet(VarSetOps::MakeEmpty(compiler)); VARSET_TP diffResolutionSet(VarSetOps::MakeEmpty(compiler)); // Get the outVarToRegMap for this block VarToRegMap outVarToRegMap = getOutVarToRegMap(block->bbNum); unsigned succCount = block->NumSucc(compiler); assert(succCount > 1); // First, determine the live regs at the end of this block so that we know what regs are // available to copy into. // Note that for this purpose we use the full live-out set, because we must ensure that // even the registers that remain the same across the edge are preserved correctly. regMaskTP liveOutRegs = RBM_NONE; VarSetOps::Iter liveOutIter(compiler, block->bbLiveOut); unsigned liveOutVarIndex = 0; while (liveOutIter.NextElem(&liveOutVarIndex)) { regNumber fromReg = getVarReg(outVarToRegMap, liveOutVarIndex); if (fromReg != REG_STK) { regMaskTP fromRegMask = genRegMask(fromReg, getIntervalForLocalVar(liveOutVarIndex)->registerType); liveOutRegs |= fromRegMask; } } // Next, if this blocks ends with a switch table, or for Arm64, ends with JCMP instruction, // make sure to not copy into the registers that are consumed at the end of this block. // // Note: Only switches and JCMP (for Arm4) have input regs (and so can be fed by copies), so those // are the only block-ending branches that need special handling. regMaskTP consumedRegs = RBM_NONE; if (block->bbJumpKind == BBJ_SWITCH) { // At this point, Lowering has transformed any non-switch-table blocks into // cascading ifs. GenTree* switchTable = LIR::AsRange(block).LastNode(); assert(switchTable != nullptr && switchTable->OperGet() == GT_SWITCH_TABLE); consumedRegs = switchTable->gtRsvdRegs; GenTree* op1 = switchTable->gtGetOp1(); GenTree* op2 = switchTable->gtGetOp2(); noway_assert(op1 != nullptr && op2 != nullptr); assert(op1->GetRegNum() != REG_NA && op2->GetRegNum() != REG_NA); // No floating point values, so no need to worry about the register type // (i.e. for ARM32, where we used the genRegMask overload with a type). assert(varTypeIsIntegralOrI(op1) && varTypeIsIntegralOrI(op2)); consumedRegs |= genRegMask(op1->GetRegNum()); consumedRegs |= genRegMask(op2->GetRegNum()); // Special handling for GT_COPY to not resolve into the source // of switch's operand. if (op1->OperIs(GT_COPY)) { GenTree* srcOp1 = op1->gtGetOp1(); consumedRegs |= genRegMask(srcOp1->GetRegNum()); } } #ifdef TARGET_ARM64 // Next, if this blocks ends with a JCMP, we have to make sure: // 1. Not to copy into the register that JCMP uses // e.g. JCMP w21, BRANCH // 2. Not to copy into the source of JCMP's operand before it is consumed // e.g. Should not use w0 since it will contain wrong value after resolution // call METHOD // ; mov w0, w19 <-- should not resolve in w0 here. // mov w21, w0 // JCMP w21, BRANCH // 3. Not to modify the local variable it must consume // Note: GT_COPY has special handling in codegen and its generation is merged with the // node that consumes its result. So both, the input and output regs of GT_COPY must be // excluded from the set available for resolution. LclVarDsc* jcmpLocalVarDsc = nullptr; if (block->bbJumpKind == BBJ_COND) { GenTree* lastNode = LIR::AsRange(block).LastNode(); if (lastNode->OperIs(GT_JCMP)) { GenTree* op1 = lastNode->gtGetOp1(); consumedRegs |= genRegMask(op1->GetRegNum()); if (op1->OperIs(GT_COPY)) { GenTree* srcOp1 = op1->gtGetOp1(); consumedRegs |= genRegMask(srcOp1->GetRegNum()); } if (op1->IsLocal()) { GenTreeLclVarCommon* lcl = op1->AsLclVarCommon(); jcmpLocalVarDsc = &compiler->lvaTable[lcl->GetLclNum()]; } } } #endif VarToRegMap sameVarToRegMap = sharedCriticalVarToRegMap; regMaskTP sameWriteRegs = RBM_NONE; regMaskTP diffReadRegs = RBM_NONE; // For each var that may require resolution, classify them as: // - in the same register at the end of this block and at each target (no resolution needed) // - in different registers at different targets (resolve separately): // diffResolutionSet // - in the same register at each target at which it's live, but different from the end of // this block. We may be able to resolve these as if it is "join", but only if they do not // write to any registers that are read by those in the diffResolutionSet: // sameResolutionSet VarSetOps::Iter outResolutionSetIter(compiler, outResolutionSet); unsigned outResolutionSetVarIndex = 0; while (outResolutionSetIter.NextElem(&outResolutionSetVarIndex)) { regNumber fromReg = getVarReg(outVarToRegMap, outResolutionSetVarIndex); bool maybeSameLivePaths = false; bool liveOnlyAtSplitEdge = true; regNumber sameToReg = REG_NA; for (unsigned succIndex = 0; succIndex < succCount; succIndex++) { BasicBlock* succBlock = block->GetSucc(succIndex, compiler); if (!VarSetOps::IsMember(compiler, succBlock->bbLiveIn, outResolutionSetVarIndex)) { maybeSameLivePaths = true; continue; } else if (liveOnlyAtSplitEdge) { // Is the var live only at those target blocks which are connected by a split edge to this block liveOnlyAtSplitEdge = ((succBlock->bbPreds->flNext == nullptr) && (succBlock != compiler->fgFirstBB)); } regNumber toReg = getVarReg(getInVarToRegMap(succBlock->bbNum), outResolutionSetVarIndex); if (sameToReg == REG_NA) { sameToReg = toReg; continue; } if (toReg == sameToReg) { continue; } sameToReg = REG_NA; break; } // Check for the cases where we can't write to a register. // We only need to check for these cases if sameToReg is an actual register (not REG_STK). if (sameToReg != REG_NA && sameToReg != REG_STK) { // If there's a path on which this var isn't live, it may use the original value in sameToReg. // In this case, sameToReg will be in the liveOutRegs of this block. // Similarly, if sameToReg is in sameWriteRegs, it has already been used (i.e. for a lclVar that's // live only at another target), and we can't copy another lclVar into that reg in this block. regMaskTP sameToRegMask = genRegMask(sameToReg, getIntervalForLocalVar(outResolutionSetVarIndex)->registerType); if (maybeSameLivePaths && (((sameToRegMask & liveOutRegs) != RBM_NONE) || ((sameToRegMask & sameWriteRegs) != RBM_NONE))) { sameToReg = REG_NA; } // If this register is busy because it is used by a switch table at the end of the block // (or for Arm64, it is consumed by JCMP), we can't do the copy in this block since we can't // insert it after the switch (or for Arm64, can't insert and overwrite the operand/source // of operand of JCMP). if ((sameToRegMask & consumedRegs) != RBM_NONE) { sameToReg = REG_NA; } #ifdef TARGET_ARM64 if (jcmpLocalVarDsc && (jcmpLocalVarDsc->lvVarIndex == outResolutionSetVarIndex)) { sameToReg = REG_NA; } #endif // If the var is live only at those blocks connected by a split edge and not live-in at some of the // target blocks, we will resolve it the same way as if it were in diffResolutionSet and resolution // will be deferred to the handling of split edges, which means copy will only be at those target(s). // // Another way to achieve similar resolution for vars live only at split edges is by removing them // from consideration up-front but it requires that we traverse those edges anyway to account for // the registers that must not be overwritten. if (liveOnlyAtSplitEdge && maybeSameLivePaths) { sameToReg = REG_NA; } } if (sameToReg == REG_NA) { VarSetOps::AddElemD(compiler, diffResolutionSet, outResolutionSetVarIndex); if (fromReg != REG_STK) { diffReadRegs |= genRegMask(fromReg, getIntervalForLocalVar(outResolutionSetVarIndex)->registerType); } } else if (sameToReg != fromReg) { VarSetOps::AddElemD(compiler, sameResolutionSet, outResolutionSetVarIndex); setVarReg(sameVarToRegMap, outResolutionSetVarIndex, sameToReg); if (sameToReg != REG_STK) { sameWriteRegs |= genRegMask(sameToReg, getIntervalForLocalVar(outResolutionSetVarIndex)->registerType); } } } if (!VarSetOps::IsEmpty(compiler, sameResolutionSet)) { if ((sameWriteRegs & diffReadRegs) != RBM_NONE) { // We cannot split the "same" and "diff" regs if the "same" set writes registers // that must be read by the "diff" set. (Note that when these are done as a "batch" // we carefully order them to ensure all the input regs are read before they are // overwritten.) VarSetOps::UnionD(compiler, diffResolutionSet, sameResolutionSet); VarSetOps::ClearD(compiler, sameResolutionSet); } else { // For any vars in the sameResolutionSet, we can simply add the move at the end of "block". resolveEdge(block, nullptr, ResolveSharedCritical, sameResolutionSet); } } if (!VarSetOps::IsEmpty(compiler, diffResolutionSet)) { for (unsigned succIndex = 0; succIndex < succCount; succIndex++) { BasicBlock* succBlock = block->GetSucc(succIndex, compiler); // Any "diffResolutionSet" resolution for a block with no other predecessors will be handled later // as split resolution. if ((succBlock->bbPreds->flNext == nullptr) && (succBlock != compiler->fgFirstBB)) { continue; } // Now collect the resolution set for just this edge, if any. // Check only the vars in diffResolutionSet that are live-in to this successor. VarToRegMap succInVarToRegMap = getInVarToRegMap(succBlock->bbNum); VARSET_TP edgeResolutionSet(VarSetOps::Intersection(compiler, diffResolutionSet, succBlock->bbLiveIn)); VarSetOps::Iter iter(compiler, edgeResolutionSet); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { regNumber fromReg = getVarReg(outVarToRegMap, varIndex); regNumber toReg = getVarReg(succInVarToRegMap, varIndex); if (fromReg == toReg) { VarSetOps::RemoveElemD(compiler, edgeResolutionSet, varIndex); } } if (!VarSetOps::IsEmpty(compiler, edgeResolutionSet)) { // For EH vars, we can always safely load them from the stack into the target for this block, // so if we have only EH vars, we'll do that instead of splitting the edge. if ((compiler->compHndBBtabCount > 0) && VarSetOps::IsSubset(compiler, edgeResolutionSet, exceptVars)) { GenTree* insertionPoint = LIR::AsRange(succBlock).FirstNode(); VarSetOps::Iter edgeSetIter(compiler, edgeResolutionSet); unsigned edgeVarIndex = 0; while (edgeSetIter.NextElem(&edgeVarIndex)) { regNumber toReg = getVarReg(succInVarToRegMap, edgeVarIndex); setVarReg(succInVarToRegMap, edgeVarIndex, REG_STK); if (toReg != REG_STK) { Interval* interval = getIntervalForLocalVar(edgeVarIndex); assert(interval->isWriteThru); addResolution(succBlock, insertionPoint, interval, toReg, REG_STK); JITDUMP(" (EHvar)\n"); } } } else { resolveEdge(block, succBlock, ResolveCritical, edgeResolutionSet); } } } } } //------------------------------------------------------------------------ // resolveEdges: Perform resolution across basic block edges // // Arguments: // None. // // Return Value: // None. // // Notes: // Traverse the basic blocks. // - If this block has a single predecessor that is not the immediately // preceding block, perform any needed 'split' resolution at the beginning of this block // - Otherwise if this block has critical incoming edges, handle them. // - If this block has a single successor that has multiple predecesors, perform any needed // 'join' resolution at the end of this block. // Note that a block may have both 'split' or 'critical' incoming edge(s) and 'join' outgoing // edges. void LinearScan::resolveEdges() { JITDUMP("RESOLVING EDGES\n"); // The resolutionCandidateVars set was initialized with all the lclVars that are live-in to // any block. We now intersect that set with any lclVars that ever spilled or split. // If there are no candidates for resoultion, simply return. VarSetOps::IntersectionD(compiler, resolutionCandidateVars, splitOrSpilledVars); if (VarSetOps::IsEmpty(compiler, resolutionCandidateVars)) { return; } // Handle all the critical edges first. // We will try to avoid resolution across critical edges in cases where all the critical-edge // targets of a block have the same home. We will then split the edges only for the // remaining mismatches. We visit the out-edges, as that allows us to share the moves that are // common among all the targets. if (hasCriticalEdges) { for (BasicBlock* const block : compiler->Blocks()) { if (block->bbNum > bbNumMaxBeforeResolution) { // This is a new block added during resolution - we don't need to visit these now. continue; } if (blockInfo[block->bbNum].hasCriticalOutEdge) { handleOutgoingCriticalEdges(block); } } } for (BasicBlock* const block : compiler->Blocks()) { if (block->bbNum > bbNumMaxBeforeResolution) { // This is a new block added during resolution - we don't need to visit these now. continue; } unsigned succCount = block->NumSucc(compiler); BasicBlock* uniquePredBlock = block->GetUniquePred(compiler); // First, if this block has a single predecessor, // we may need resolution at the beginning of this block. // This may be true even if it's the block we used for starting locations, // if a variable was spilled. VARSET_TP inResolutionSet(VarSetOps::Intersection(compiler, block->bbLiveIn, resolutionCandidateVars)); if (!VarSetOps::IsEmpty(compiler, inResolutionSet)) { if (uniquePredBlock != nullptr) { // We may have split edges during critical edge resolution, and in the process split // a non-critical edge as well. // It is unlikely that we would ever have more than one of these in sequence (indeed, // I don't think it's possible), but there's no need to assume that it can't. while (uniquePredBlock->bbNum > bbNumMaxBeforeResolution) { uniquePredBlock = uniquePredBlock->GetUniquePred(compiler); noway_assert(uniquePredBlock != nullptr); } resolveEdge(uniquePredBlock, block, ResolveSplit, inResolutionSet); } } // Finally, if this block has a single successor: // - and that has at least one other predecessor (otherwise we will do the resolution at the // top of the successor), // - and that is not the target of a critical edge (otherwise we've already handled it) // we may need resolution at the end of this block. if (succCount == 1) { BasicBlock* succBlock = block->GetSucc(0, compiler); if (succBlock->GetUniquePred(compiler) == nullptr) { VARSET_TP outResolutionSet( VarSetOps::Intersection(compiler, succBlock->bbLiveIn, resolutionCandidateVars)); if (!VarSetOps::IsEmpty(compiler, outResolutionSet)) { resolveEdge(block, succBlock, ResolveJoin, outResolutionSet); } } } } // Now, fixup the mapping for any blocks that were adding for edge splitting. // See the comment prior to the call to fgSplitEdge() in resolveEdge(). // Note that we could fold this loop in with the checking code below, but that // would only improve the debug case, and would clutter up the code somewhat. if (compiler->fgBBNumMax > bbNumMaxBeforeResolution) { for (BasicBlock* const block : compiler->Blocks()) { if (block->bbNum > bbNumMaxBeforeResolution) { // There may be multiple blocks inserted when we split. But we must always have exactly // one path (i.e. all blocks must be single-successor and single-predecessor), // and only one block along the path may be non-empty. // Note that we may have a newly-inserted block that is empty, but which connects // two non-resolution blocks. This happens when an edge is split that requires it. BasicBlock* succBlock = block; do { succBlock = succBlock->GetUniqueSucc(); noway_assert(succBlock != nullptr); } while ((succBlock->bbNum > bbNumMaxBeforeResolution) && succBlock->isEmpty()); BasicBlock* predBlock = block; do { predBlock = predBlock->GetUniquePred(compiler); noway_assert(predBlock != nullptr); } while ((predBlock->bbNum > bbNumMaxBeforeResolution) && predBlock->isEmpty()); unsigned succBBNum = succBlock->bbNum; unsigned predBBNum = predBlock->bbNum; if (block->isEmpty()) { // For the case of the empty block, find the non-resolution block (succ or pred). if (predBBNum > bbNumMaxBeforeResolution) { assert(succBBNum <= bbNumMaxBeforeResolution); predBBNum = 0; } else { succBBNum = 0; } } else { assert((succBBNum <= bbNumMaxBeforeResolution) && (predBBNum <= bbNumMaxBeforeResolution)); } SplitEdgeInfo info = {predBBNum, succBBNum}; getSplitBBNumToTargetBBNumMap()->Set(block->bbNum, info); // Set both the live-in and live-out to the live-in of the successor (by construction liveness // doesn't change in a split block). VarSetOps::Assign(compiler, block->bbLiveIn, succBlock->bbLiveIn); VarSetOps::Assign(compiler, block->bbLiveOut, succBlock->bbLiveIn); } } } #ifdef DEBUG // Make sure the varToRegMaps match up on all edges. bool foundMismatch = false; for (BasicBlock* const block : compiler->Blocks()) { if (block->isEmpty() && block->bbNum > bbNumMaxBeforeResolution) { continue; } VarToRegMap toVarToRegMap = getInVarToRegMap(block->bbNum); for (BasicBlock* const predBlock : block->PredBlocks()) { VarToRegMap fromVarToRegMap = getOutVarToRegMap(predBlock->bbNum); VarSetOps::Iter iter(compiler, block->bbLiveIn); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { regNumber fromReg = getVarReg(fromVarToRegMap, varIndex); regNumber toReg = getVarReg(toVarToRegMap, varIndex); if (fromReg != toReg) { Interval* interval = getIntervalForLocalVar(varIndex); // The fromReg and toReg may not match for a write-thru interval where the toReg is // REG_STK, since the stack value is always valid for that case (so no move is needed). if (!interval->isWriteThru || (toReg != REG_STK)) { if (!foundMismatch) { foundMismatch = true; printf("Found mismatched var locations after resolution!\n"); } printf(" V%02u: " FMT_BB " to " FMT_BB ": %s to %s\n", interval->varNum, predBlock->bbNum, block->bbNum, getRegName(fromReg), getRegName(toReg)); } } } } } assert(!foundMismatch); #endif JITDUMP("\n"); } //------------------------------------------------------------------------ // resolveEdge: Perform the specified type of resolution between two blocks. // // Arguments: // fromBlock - the block from which the edge originates // toBlock - the block at which the edge terminates // resolveType - the type of resolution to be performed // liveSet - the set of tracked lclVar indices which may require resolution // // Return Value: // None. // // Assumptions: // The caller must have performed the analysis to determine the type of the edge. // // Notes: // This method emits the correctly ordered moves necessary to place variables in the // correct registers across a Split, Join or Critical edge. // In order to avoid overwriting register values before they have been moved to their // new home (register/stack), it first does the register-to-stack moves (to free those // registers), then the register to register moves, ensuring that the target register // is free before the move, and then finally the stack to register moves. void LinearScan::resolveEdge(BasicBlock* fromBlock, BasicBlock* toBlock, ResolveType resolveType, VARSET_VALARG_TP liveSet) { VarToRegMap fromVarToRegMap = getOutVarToRegMap(fromBlock->bbNum); VarToRegMap toVarToRegMap; if (resolveType == ResolveSharedCritical) { toVarToRegMap = sharedCriticalVarToRegMap; } else { toVarToRegMap = getInVarToRegMap(toBlock->bbNum); } // The block to which we add the resolution moves depends on the resolveType BasicBlock* block; switch (resolveType) { case ResolveJoin: case ResolveSharedCritical: block = fromBlock; break; case ResolveSplit: block = toBlock; break; case ResolveCritical: // fgSplitEdge may add one or two BasicBlocks. It returns the block that splits // the edge from 'fromBlock' and 'toBlock', but if it inserts that block right after // a block with a fall-through it will have to create another block to handle that edge. // These new blocks can be mapped to existing blocks in order to correctly handle // the calls to recordVarLocationsAtStartOfBB() from codegen. That mapping is handled // in resolveEdges(), after all the edge resolution has been done (by calling this // method for each edge). block = compiler->fgSplitEdge(fromBlock, toBlock); // Split edges are counted against fromBlock. INTRACK_STATS(updateLsraStat(STAT_SPLIT_EDGE, fromBlock->bbNum)); break; default: unreached(); break; } #ifndef TARGET_XARCH // We record tempregs for beginning and end of each block. // For amd64/x86 we only need a tempReg for float - we'll use xchg for int. // TODO-Throughput: It would be better to determine the tempRegs on demand, but the code below // modifies the varToRegMaps so we don't have all the correct registers at the time // we need to get the tempReg. regNumber tempRegInt = (resolveType == ResolveSharedCritical) ? REG_NA : getTempRegForResolution(fromBlock, toBlock, TYP_INT); #endif // !TARGET_XARCH regNumber tempRegFlt = REG_NA; #ifdef TARGET_ARM regNumber tempRegDbl = REG_NA; #endif if ((compiler->compFloatingPointUsed) && (resolveType != ResolveSharedCritical)) { #ifdef TARGET_ARM // Try to reserve a double register for TYP_DOUBLE and use it for TYP_FLOAT too if available. tempRegDbl = getTempRegForResolution(fromBlock, toBlock, TYP_DOUBLE); if (tempRegDbl != REG_NA) { tempRegFlt = tempRegDbl; } else #endif // TARGET_ARM { tempRegFlt = getTempRegForResolution(fromBlock, toBlock, TYP_FLOAT); } } regMaskTP targetRegsToDo = RBM_NONE; regMaskTP targetRegsReady = RBM_NONE; regMaskTP targetRegsFromStack = RBM_NONE; // The following arrays capture the location of the registers as they are moved: // - location[reg] gives the current location of the var that was originally in 'reg'. // (Note that a var may be moved more than once.) // - source[reg] gives the original location of the var that needs to be moved to 'reg'. // For example, if a var is in rax and needs to be moved to rsi, then we would start with: // location[rax] == rax // source[rsi] == rax -- this doesn't change // Then, if for some reason we need to move it temporary to rbx, we would have: // location[rax] == rbx // Once we have completed the move, we will have: // location[rax] == REG_NA // This indicates that the var originally in rax is now in its target register. regNumberSmall location[REG_COUNT]; C_ASSERT(sizeof(char) == sizeof(regNumberSmall)); // for memset to work memset(location, REG_NA, REG_COUNT); regNumberSmall source[REG_COUNT]; memset(source, REG_NA, REG_COUNT); // What interval is this register associated with? // (associated with incoming reg) Interval* sourceIntervals[REG_COUNT]; memset(&sourceIntervals, 0, sizeof(sourceIntervals)); // Intervals for vars that need to be loaded from the stack Interval* stackToRegIntervals[REG_COUNT]; memset(&stackToRegIntervals, 0, sizeof(stackToRegIntervals)); // Get the starting insertion point for the "to" resolution GenTree* insertionPoint = nullptr; if (resolveType == ResolveSplit || resolveType == ResolveCritical) { insertionPoint = LIR::AsRange(block).FirstNode(); } // If this is an edge between EH regions, we may have "extra" live-out EH vars. // If we are adding resolution at the end of the block, we need to create "virtual" moves // for these so that their registers are freed and can be reused. if ((resolveType == ResolveJoin) && (compiler->compHndBBtabCount > 0)) { VARSET_TP extraLiveSet(VarSetOps::Diff(compiler, block->bbLiveOut, toBlock->bbLiveIn)); VarSetOps::IntersectionD(compiler, extraLiveSet, exceptVars); VarSetOps::Iter iter(compiler, extraLiveSet); unsigned extraVarIndex = 0; while (iter.NextElem(&extraVarIndex)) { Interval* interval = getIntervalForLocalVar(extraVarIndex); assert(interval->isWriteThru); regNumber fromReg = getVarReg(fromVarToRegMap, extraVarIndex); if (fromReg != REG_STK) { addResolution(block, insertionPoint, interval, REG_STK, fromReg); JITDUMP(" (EH DUMMY)\n"); setVarReg(fromVarToRegMap, extraVarIndex, REG_STK); } } } // First: // - Perform all moves from reg to stack (no ordering needed on these) // - For reg to reg moves, record the current location, associating their // source location with the target register they need to go into // - For stack to reg moves (done last, no ordering needed between them) // record the interval associated with the target reg // TODO-Throughput: We should be looping over the liveIn and liveOut registers, since // that will scale better than the live variables VarSetOps::Iter iter(compiler, liveSet); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { Interval* interval = getIntervalForLocalVar(varIndex); regNumber fromReg = getVarReg(fromVarToRegMap, varIndex); regNumber toReg = getVarReg(toVarToRegMap, varIndex); if (fromReg == toReg) { continue; } if (interval->isWriteThru && (toReg == REG_STK)) { // We don't actually move a writeThru var back to the stack, as its stack value is always valid. // However, if this is a Join edge (i.e. the move is happening at the bottom of the block), // and it is a "normal" flow edge, we will go ahead and generate a mov instruction, which will be // a NOP but will cause the variable to be removed from being live in the register. if ((resolveType == ResolveSplit) || block->hasEHBoundaryOut()) { continue; } } // For Critical edges, the location will not change on either side of the edge, // since we'll add a new block to do the move. if (resolveType == ResolveSplit) { setVarReg(toVarToRegMap, varIndex, fromReg); } else if (resolveType == ResolveJoin || resolveType == ResolveSharedCritical) { setVarReg(fromVarToRegMap, varIndex, toReg); } assert(fromReg < UCHAR_MAX && toReg < UCHAR_MAX); if (fromReg == REG_STK) { stackToRegIntervals[toReg] = interval; targetRegsFromStack |= genRegMask(toReg); } else if (toReg == REG_STK) { // Do the reg to stack moves now addResolution(block, insertionPoint, interval, REG_STK, fromReg); JITDUMP(" (%s)\n", (interval->isWriteThru && (toReg == REG_STK)) ? "EH DUMMY" : resolveTypeName[resolveType]); } else { location[fromReg] = (regNumberSmall)fromReg; source[toReg] = (regNumberSmall)fromReg; sourceIntervals[fromReg] = interval; targetRegsToDo |= genRegMask(toReg); } } // REGISTER to REGISTER MOVES // First, find all the ones that are ready to move now regMaskTP targetCandidates = targetRegsToDo; while (targetCandidates != RBM_NONE) { regMaskTP targetRegMask = genFindLowestBit(targetCandidates); targetCandidates &= ~targetRegMask; regNumber targetReg = genRegNumFromMask(targetRegMask); if (location[targetReg] == REG_NA) { #ifdef TARGET_ARM regNumber sourceReg = (regNumber)source[targetReg]; Interval* interval = sourceIntervals[sourceReg]; if (interval->registerType == TYP_DOUBLE) { // For ARM32, make sure that both of the float halves of the double register are available. assert(genIsValidDoubleReg(targetReg)); regNumber anotherHalfRegNum = REG_NEXT(targetReg); if (location[anotherHalfRegNum] == REG_NA) { targetRegsReady |= targetRegMask; } } else #endif // TARGET_ARM { targetRegsReady |= targetRegMask; } } } // Perform reg to reg moves while (targetRegsToDo != RBM_NONE) { while (targetRegsReady != RBM_NONE) { regMaskTP targetRegMask = genFindLowestBit(targetRegsReady); targetRegsToDo &= ~targetRegMask; targetRegsReady &= ~targetRegMask; regNumber targetReg = genRegNumFromMask(targetRegMask); assert(location[targetReg] != targetReg); assert(targetReg < REG_COUNT); regNumber sourceReg = (regNumber)source[targetReg]; assert(sourceReg < REG_COUNT); regNumber fromReg = (regNumber)location[sourceReg]; // stack to reg movs should be done last as part of "targetRegsFromStack" assert(fromReg < REG_STK); Interval* interval = sourceIntervals[sourceReg]; assert(interval != nullptr); addResolution(block, insertionPoint, interval, targetReg, fromReg); JITDUMP(" (%s)\n", resolveTypeName[resolveType]); sourceIntervals[sourceReg] = nullptr; location[sourceReg] = REG_NA; regMaskTP fromRegMask = genRegMask(fromReg); // Do we have a free targetReg? if (fromReg == sourceReg) { if (source[fromReg] != REG_NA && ((targetRegsFromStack & fromRegMask) != fromRegMask)) { targetRegsReady |= fromRegMask; #ifdef TARGET_ARM if (genIsValidDoubleReg(fromReg)) { // Ensure that either: // - the Interval targeting fromReg is not double, or // - the other half of the double is free. Interval* otherInterval = sourceIntervals[source[fromReg]]; regNumber upperHalfReg = REG_NEXT(fromReg); if ((otherInterval->registerType == TYP_DOUBLE) && (location[upperHalfReg] != REG_NA)) { targetRegsReady &= ~fromRegMask; } } } else if (genIsValidFloatReg(fromReg) && !genIsValidDoubleReg(fromReg)) { // We may have freed up the other half of a double where the lower half // was already free. regNumber lowerHalfReg = REG_PREV(fromReg); regNumber lowerHalfSrcReg = (regNumber)source[lowerHalfReg]; regNumber lowerHalfSrcLoc = (regNumber)location[lowerHalfReg]; regMaskTP lowerHalfRegMask = genRegMask(lowerHalfReg); // Necessary conditions: // - There is a source register for this reg (lowerHalfSrcReg != REG_NA) // - It is currently free (lowerHalfSrcLoc == REG_NA) // - The source interval isn't yet completed (sourceIntervals[lowerHalfSrcReg] != nullptr) // - It's not in the ready set ((targetRegsReady & lowerHalfRegMask) == // RBM_NONE) // - It's not resolved from stack ((targetRegsFromStack & lowerHalfRegMask) != // lowerHalfRegMask) if ((lowerHalfSrcReg != REG_NA) && (lowerHalfSrcLoc == REG_NA) && (sourceIntervals[lowerHalfSrcReg] != nullptr) && ((targetRegsReady & lowerHalfRegMask) == RBM_NONE) && ((targetRegsFromStack & lowerHalfRegMask) != lowerHalfRegMask)) { // This must be a double interval, otherwise it would be in targetRegsReady, or already // completed. assert(sourceIntervals[lowerHalfSrcReg]->registerType == TYP_DOUBLE); targetRegsReady |= lowerHalfRegMask; } #endif // TARGET_ARM } } } if (targetRegsToDo != RBM_NONE) { regMaskTP targetRegMask = genFindLowestBit(targetRegsToDo); regNumber targetReg = genRegNumFromMask(targetRegMask); // Is it already there due to other moves? // If not, move it to the temp reg, OR swap it with another register regNumber sourceReg = (regNumber)source[targetReg]; regNumber fromReg = (regNumber)location[sourceReg]; if (targetReg == fromReg) { targetRegsToDo &= ~targetRegMask; } else { regNumber tempReg = REG_NA; bool useSwap = false; if (emitter::isFloatReg(targetReg)) { #ifdef TARGET_ARM if (sourceIntervals[fromReg]->registerType == TYP_DOUBLE) { // ARM32 requires a double temp register for TYP_DOUBLE. tempReg = tempRegDbl; } else #endif // TARGET_ARM tempReg = tempRegFlt; } #ifdef TARGET_XARCH else { useSwap = true; } #else // !TARGET_XARCH else { tempReg = tempRegInt; } #endif // !TARGET_XARCH if (useSwap || tempReg == REG_NA) { // First, we have to figure out the destination register for what's currently in fromReg, // so that we can find its sourceInterval. regNumber otherTargetReg = REG_NA; // By chance, is fromReg going where it belongs? if (location[source[fromReg]] == targetReg) { otherTargetReg = fromReg; // If we can swap, we will be done with otherTargetReg as well. // Otherwise, we'll spill it to the stack and reload it later. if (useSwap) { regMaskTP fromRegMask = genRegMask(fromReg); targetRegsToDo &= ~fromRegMask; } } else { // Look at the remaining registers from targetRegsToDo (which we expect to be relatively // small at this point) to find out what's currently in targetReg. regMaskTP mask = targetRegsToDo; while (mask != RBM_NONE && otherTargetReg == REG_NA) { regMaskTP nextRegMask = genFindLowestBit(mask); regNumber nextReg = genRegNumFromMask(nextRegMask); mask &= ~nextRegMask; if (location[source[nextReg]] == targetReg) { otherTargetReg = nextReg; } } } assert(otherTargetReg != REG_NA); if (useSwap) { // Generate a "swap" of fromReg and targetReg insertSwap(block, insertionPoint, sourceIntervals[source[otherTargetReg]]->varNum, targetReg, sourceIntervals[sourceReg]->varNum, fromReg); location[sourceReg] = REG_NA; location[source[otherTargetReg]] = (regNumberSmall)fromReg; INTRACK_STATS(updateLsraStat(STAT_RESOLUTION_MOV, block->bbNum)); } else { // Spill "targetReg" to the stack and add its eventual target (otherTargetReg) // to "targetRegsFromStack", which will be handled below. // NOTE: This condition is very rare. Setting COMPlus_JitStressRegs=0x203 // has been known to trigger it in JIT SH. // First, spill "otherInterval" from targetReg to the stack. Interval* otherInterval = sourceIntervals[source[otherTargetReg]]; setIntervalAsSpilled(otherInterval); addResolution(block, insertionPoint, otherInterval, REG_STK, targetReg); JITDUMP(" (%s)\n", resolveTypeName[resolveType]); location[source[otherTargetReg]] = REG_STK; regMaskTP otherTargetRegMask = genRegMask(otherTargetReg); targetRegsFromStack |= otherTargetRegMask; stackToRegIntervals[otherTargetReg] = otherInterval; targetRegsToDo &= ~otherTargetRegMask; // Now, move the interval that is going to targetReg. addResolution(block, insertionPoint, sourceIntervals[sourceReg], targetReg, fromReg); JITDUMP(" (%s)\n", resolveTypeName[resolveType]); location[sourceReg] = REG_NA; // Add its "fromReg" to "targetRegsReady", only if: // - It was one of the target register we originally determined. // - It is not the eventual target (otherTargetReg) because its // value will be retrieved from STK. if (source[fromReg] != REG_NA && fromReg != otherTargetReg) { regMaskTP fromRegMask = genRegMask(fromReg); targetRegsReady |= fromRegMask; #ifdef TARGET_ARM if (genIsValidDoubleReg(fromReg)) { // Ensure that either: // - the Interval targeting fromReg is not double, or // - the other half of the double is free. Interval* otherInterval = sourceIntervals[source[fromReg]]; regNumber upperHalfReg = REG_NEXT(fromReg); if ((otherInterval->registerType == TYP_DOUBLE) && (location[upperHalfReg] != REG_NA)) { targetRegsReady &= ~fromRegMask; } } #endif // TARGET_ARM } } targetRegsToDo &= ~targetRegMask; } else { compiler->codeGen->regSet.rsSetRegsModified(genRegMask(tempReg) DEBUGARG(true)); #ifdef TARGET_ARM if (sourceIntervals[fromReg]->registerType == TYP_DOUBLE) { assert(genIsValidDoubleReg(targetReg)); assert(genIsValidDoubleReg(tempReg)); addResolutionForDouble(block, insertionPoint, sourceIntervals, location, tempReg, targetReg, resolveType); } else #endif // TARGET_ARM { assert(sourceIntervals[targetReg] != nullptr); addResolution(block, insertionPoint, sourceIntervals[targetReg], tempReg, targetReg); JITDUMP(" (%s)\n", resolveTypeName[resolveType]); location[targetReg] = (regNumberSmall)tempReg; } targetRegsReady |= targetRegMask; } } } } // Finally, perform stack to reg moves // All the target regs will be empty at this point while (targetRegsFromStack != RBM_NONE) { regMaskTP targetRegMask = genFindLowestBit(targetRegsFromStack); targetRegsFromStack &= ~targetRegMask; regNumber targetReg = genRegNumFromMask(targetRegMask); Interval* interval = stackToRegIntervals[targetReg]; assert(interval != nullptr); addResolution(block, insertionPoint, interval, targetReg, REG_STK); JITDUMP(" (%s)\n", resolveTypeName[resolveType]); } } #if TRACK_LSRA_STATS const char* LinearScan::getStatName(unsigned stat) { LsraStat lsraStat = (LsraStat)stat; assert(lsraStat != LsraStat::COUNT); static const char* const lsraStatNames[] = { #define LSRA_STAT_DEF(stat, name) name, #include "lsra_stats.h" #undef LSRA_STAT_DEF #define REG_SEL_DEF(stat, value, shortname, orderSeqId) #stat, #include "lsra_score.h" #undef REG_SEL_DEF }; assert(stat < ArrLen(lsraStatNames)); return lsraStatNames[lsraStat]; } LsraStat LinearScan::getLsraStatFromScore(RegisterScore registerScore) { switch (registerScore) { #define REG_SEL_DEF(stat, value, shortname, orderSeqId) \ case RegisterScore::stat: \ return LsraStat::STAT_##stat; #include "lsra_score.h" #undef REG_SEL_DEF default: return LsraStat::STAT_FREE; } } // ---------------------------------------------------------- // updateLsraStat: Increment LSRA stat counter. // // Arguments: // stat - LSRA stat enum // bbNum - Basic block to which LSRA stat needs to be // associated with. // void LinearScan::updateLsraStat(LsraStat stat, unsigned bbNum) { if (bbNum > bbNumMaxBeforeResolution) { // This is a newly created basic block as part of resolution. // These blocks contain resolution moves that are already accounted. return; } ++(blockInfo[bbNum].stats[(unsigned)stat]); } // ----------------------------------------------------------- // dumpLsraStats - dumps Lsra stats to given file. // // Arguments: // file - file to which stats are to be written. // void LinearScan::dumpLsraStats(FILE* file) { unsigned sumStats[LsraStat::COUNT] = {0}; weight_t wtdStats[LsraStat::COUNT] = {0}; fprintf(file, "----------\n"); fprintf(file, "LSRA Stats"); #ifdef DEBUG if (!VERBOSE) { fprintf(file, " : %s\n", compiler->info.compFullName); } else { // In verbose mode no need to print full name // while printing lsra stats. fprintf(file, "\n"); } #else fprintf(file, " : %s\n", compiler->eeGetMethodFullName(compiler->info.compCompHnd)); #endif fprintf(file, "----------\n"); #ifdef DEBUG fprintf(file, "Register selection order: %S\n", JitConfig.JitLsraOrdering() == nullptr ? W("ABCDEFGHIJKLMNOPQ") : JitConfig.JitLsraOrdering()); #endif fprintf(file, "Total Tracked Vars: %d\n", compiler->lvaTrackedCount); fprintf(file, "Total Reg Cand Vars: %d\n", regCandidateVarCount); fprintf(file, "Total number of Intervals: %d\n", static_cast<unsigned>((intervals.size() == 0 ? 0 : (intervals.size() - 1)))); fprintf(file, "Total number of RefPositions: %d\n", static_cast<unsigned>(refPositions.size() - 1)); // compute total number of spill temps created unsigned numSpillTemps = 0; for (int i = 0; i < TYP_COUNT; i++) { numSpillTemps += maxSpill[i]; } fprintf(file, "Total Number of spill temps created: %d\n", numSpillTemps); fprintf(file, "..........\n"); bool addedBlockHeader = false; bool anyNonZeroStat = false; // Iterate for block 0 for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++) { unsigned lsraStat = blockInfo[0].stats[statIndex]; if (lsraStat != 0) { if (!addedBlockHeader) { addedBlockHeader = true; fprintf(file, FMT_BB " [%8.2f]: ", 0, blockInfo[0].weight); fprintf(file, "%s = %d", getStatName(statIndex), lsraStat); } else { fprintf(file, ", %s = %d", getStatName(statIndex), lsraStat); } sumStats[statIndex] += lsraStat; wtdStats[statIndex] += (lsraStat * blockInfo[0].weight); anyNonZeroStat = true; } } if (anyNonZeroStat) { fprintf(file, "\n"); } // Iterate for remaining blocks for (BasicBlock* const block : compiler->Blocks()) { if (block->bbNum > bbNumMaxBeforeResolution) { continue; } addedBlockHeader = false; anyNonZeroStat = false; for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++) { unsigned lsraStat = blockInfo[block->bbNum].stats[statIndex]; if (lsraStat != 0) { if (!addedBlockHeader) { addedBlockHeader = true; fprintf(file, FMT_BB " [%8.2f]: ", block->bbNum, block->bbWeight); fprintf(file, "%s = %d", getStatName(statIndex), lsraStat); } else { fprintf(file, ", %s = %d", getStatName(statIndex), lsraStat); } sumStats[statIndex] += lsraStat; wtdStats[statIndex] += (lsraStat * block->bbWeight); anyNonZeroStat = true; } } if (anyNonZeroStat) { fprintf(file, "\n"); } } fprintf(file, "..........\n"); for (int regSelectI = 0; regSelectI < LsraStat::COUNT; regSelectI++) { if (regSelectI == firstRegSelStat) { fprintf(file, "..........\n"); } if ((regSelectI < firstRegSelStat) || (sumStats[regSelectI] != 0)) { // Print register selection stats if (regSelectI >= firstRegSelStat) { fprintf(file, "Total %s [#%2d] : %d Weighted: %f\n", getStatName(regSelectI), (regSelectI - firstRegSelStat + 1), sumStats[regSelectI], wtdStats[regSelectI]); } else { fprintf(file, "Total %s : %d Weighted: %f\n", getStatName(regSelectI), sumStats[regSelectI], wtdStats[regSelectI]); } } } printf("\n"); } // ----------------------------------------------------------- // dumpLsraStatsCsvFormat - dumps Lsra stats to given file in csv format. // // Arguments: // file - file to which stats are to be written. // void LinearScan::dumpLsraStatsCsv(FILE* file) { unsigned sumStats[LsraStat::COUNT] = {0}; // Write the header if the file is empty if (ftell(file) == 0) { // header fprintf(file, "\"Method Name\""); for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++) { fprintf(file, ",\"%s\"", LinearScan::getStatName(statIndex)); } fprintf(file, ",\"PerfScore\"\n"); } // bbNum == 0 for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++) { sumStats[statIndex] += blockInfo[0].stats[statIndex]; } // blocks for (BasicBlock* const block : compiler->Blocks()) { if (block->bbNum > bbNumMaxBeforeResolution) { continue; } for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++) { sumStats[statIndex] += blockInfo[block->bbNum].stats[statIndex]; } } fprintf(file, "\"%s\"", compiler->info.compFullName); for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++) { fprintf(file, ",%u", sumStats[statIndex]); } fprintf(file, ",%.2f\n", compiler->info.compPerfScore); } // ----------------------------------------------------------- // dumpLsraStatsSummary - dumps Lsra stats summary to given file // // Arguments: // file - file to which stats are to be written. // void LinearScan::dumpLsraStatsSummary(FILE* file) { unsigned sumStats[LsraStat::STAT_FREE] = {0}; weight_t wtdStats[LsraStat::STAT_FREE] = {0.0}; // Iterate for block 0 for (int statIndex = 0; statIndex < LsraStat::STAT_FREE; statIndex++) { unsigned lsraStat = blockInfo[0].stats[statIndex]; sumStats[statIndex] += lsraStat; wtdStats[statIndex] += (lsraStat * blockInfo[0].weight); } // Iterate for remaining blocks for (BasicBlock* const block : compiler->Blocks()) { if (block->bbNum > bbNumMaxBeforeResolution) { continue; } for (int statIndex = 0; statIndex < LsraStat::STAT_FREE; statIndex++) { unsigned lsraStat = blockInfo[block->bbNum].stats[statIndex]; sumStats[statIndex] += lsraStat; wtdStats[statIndex] += (lsraStat * block->bbWeight); } } for (int regSelectI = 0; regSelectI < LsraStat::STAT_FREE; regSelectI++) { fprintf(file, ", %s %u %sWt %f", getStatName(regSelectI), sumStats[regSelectI], getStatName(regSelectI), wtdStats[regSelectI]); } } #endif // TRACK_LSRA_STATS #ifdef DEBUG void dumpRegMask(regMaskTP regs) { if (regs == RBM_ALLINT) { printf("[allInt]"); } else if (regs == (RBM_ALLINT & ~RBM_FPBASE)) { printf("[allIntButFP]"); } else if (regs == RBM_ALLFLOAT) { printf("[allFloat]"); } else if (regs == RBM_ALLDOUBLE) { printf("[allDouble]"); } else { dspRegMask(regs); } } static const char* getRefTypeName(RefType refType) { switch (refType) { #define DEF_REFTYPE(memberName, memberValue, shortName) \ case memberName: \ return #memberName; #include "lsra_reftypes.h" #undef DEF_REFTYPE default: return nullptr; } } static const char* getRefTypeShortName(RefType refType) { switch (refType) { #define DEF_REFTYPE(memberName, memberValue, shortName) \ case memberName: \ return shortName; #include "lsra_reftypes.h" #undef DEF_REFTYPE default: return nullptr; } } //------------------------------------------------------------------------ // getScoreName: Returns the texual name of register score const char* LinearScan::getScoreName(RegisterScore score) { switch (score) { #define REG_SEL_DEF(stat, value, shortname, orderSeqId) \ case stat: \ return shortname; #include "lsra_score.h" #undef REG_SEL_DEF default: return " - "; } } void RefPosition::dump(LinearScan* linearScan) { printf("<RefPosition #%-3u @%-3u", rpNum, nodeLocation); printf(" %s ", getRefTypeName(refType)); if (this->IsPhysRegRef()) { this->getReg()->tinyDump(); } else if (getInterval()) { this->getInterval()->tinyDump(); } if (this->treeNode) { printf("%s", treeNode->OpName(treeNode->OperGet())); if (this->treeNode->IsMultiRegNode()) { printf("[%d]", this->multiRegIdx); } } printf(" " FMT_BB " ", this->bbNum); printf("regmask="); dumpRegMask(registerAssignment); printf(" minReg=%d", minRegCandidateCount); if (this->lastUse) { printf(" last"); } if (this->reload) { printf(" reload"); } if (this->spillAfter) { printf(" spillAfter"); } if (this->singleDefSpill) { printf(" singleDefSpill"); } if (this->writeThru) { printf(" writeThru"); } if (this->moveReg) { printf(" move"); } if (this->copyReg) { printf(" copy"); } if (this->isFixedRegRef) { printf(" fixed"); } if (this->isLocalDefUse) { printf(" local"); } if (this->delayRegFree) { printf(" delay"); } if (this->outOfOrder) { printf(" outOfOrder"); } if (this->RegOptional()) { printf(" regOptional"); } printf(" wt=%.2f", linearScan->getWeight(this)); printf(">\n"); } void RegRecord::dump() { tinyDump(); } void Interval::dump() { printf("Interval %2u:", intervalIndex); if (isLocalVar) { printf(" (V%02u)", varNum); } else if (IsUpperVector()) { assert(relatedInterval != nullptr); printf(" (U%02u)", relatedInterval->varNum); } printf(" %s", varTypeName(registerType)); if (isInternal) { printf(" (INTERNAL)"); } if (isSpilled) { printf(" (SPILLED)"); } if (isSplit) { printf(" (SPLIT)"); } if (isStructField) { printf(" (field)"); } if (isPromotedStruct) { printf(" (promoted struct)"); } if (hasConflictingDefUse) { printf(" (def-use conflict)"); } if (hasInterferingUses) { printf(" (interfering uses)"); } if (isSpecialPutArg) { printf(" (specialPutArg)"); } if (isConstant) { printf(" (constant)"); } if (isWriteThru) { printf(" (writeThru)"); } printf(" RefPositions {"); for (RefPosition* refPosition = this->firstRefPosition; refPosition != nullptr; refPosition = refPosition->nextRefPosition) { printf("#%u@%u", refPosition->rpNum, refPosition->nodeLocation); if (refPosition->nextRefPosition) { printf(" "); } } printf("}"); // this is not used (yet?) // printf(" SpillOffset %d", this->spillOffset); printf(" physReg:%s", getRegName(physReg)); printf(" Preferences="); dumpRegMask(this->registerPreferences); if (relatedInterval) { printf(" RelatedInterval "); relatedInterval->microDump(); } printf("\n"); } // print out very concise representation void Interval::tinyDump() { printf("<Ivl:%u", intervalIndex); if (isLocalVar) { printf(" V%02u", varNum); } else if (IsUpperVector()) { assert(relatedInterval != nullptr); printf(" (U%02u)", relatedInterval->varNum); } else if (isInternal) { printf(" internal"); } printf("> "); } // print out extremely concise representation void Interval::microDump() { if (isLocalVar) { printf("<V%02u/L%u>", varNum, intervalIndex); return; } else if (IsUpperVector()) { assert(relatedInterval != nullptr); printf(" (U%02u)", relatedInterval->varNum); } char intervalTypeChar = 'I'; if (isInternal) { intervalTypeChar = 'T'; } printf("<%c%u>", intervalTypeChar, intervalIndex); } void RegRecord::tinyDump() { printf("<Reg:%-3s> ", getRegName(regNum)); } void LinearScan::dumpDefList() { if (!VERBOSE) { return; } JITDUMP("DefList: { "); bool first = true; for (RefInfoListNode *listNode = defList.Begin(), *end = defList.End(); listNode != end; listNode = listNode->Next()) { GenTree* node = listNode->treeNode; JITDUMP("%sN%03u.t%d. %s", first ? "" : "; ", node->gtSeqNum, node->gtTreeID, GenTree::OpName(node->OperGet())); first = false; } JITDUMP(" }\n"); } void LinearScan::lsraDumpIntervals(const char* msg) { printf("\nLinear scan intervals %s:\n", msg); for (Interval& interval : intervals) { // only dump something if it has references // if (interval->firstRefPosition) interval.dump(); } printf("\n"); } // Dumps a tree node as a destination or source operand, with the style // of dump dependent on the mode void LinearScan::lsraGetOperandString(GenTree* tree, LsraTupleDumpMode mode, char* operandString, unsigned operandStringLength) { const char* lastUseChar = ""; if (tree->OperIsScalarLocal() && ((tree->gtFlags & GTF_VAR_DEATH) != 0)) { lastUseChar = "*"; } switch (mode) { case LinearScan::LSRA_DUMP_PRE: case LinearScan::LSRA_DUMP_REFPOS: _snprintf_s(operandString, operandStringLength, operandStringLength, "t%d%s", tree->gtTreeID, lastUseChar); break; case LinearScan::LSRA_DUMP_POST: { Compiler* compiler = JitTls::GetCompiler(); if (!tree->gtHasReg(compiler)) { _snprintf_s(operandString, operandStringLength, operandStringLength, "STK%s", lastUseChar); } else { int charCount = _snprintf_s(operandString, operandStringLength, operandStringLength, "%s%s", getRegName(tree->GetRegNum()), lastUseChar); operandString += charCount; operandStringLength -= charCount; if (tree->IsMultiRegNode()) { unsigned regCount = tree->GetMultiRegCount(compiler); for (unsigned regIndex = 1; regIndex < regCount; regIndex++) { charCount = _snprintf_s(operandString, operandStringLength, operandStringLength, ",%s%s", getRegName(tree->GetRegByIndex(regIndex)), lastUseChar); operandString += charCount; operandStringLength -= charCount; } } } } break; default: printf("ERROR: INVALID TUPLE DUMP MODE\n"); break; } } void LinearScan::lsraDispNode(GenTree* tree, LsraTupleDumpMode mode, bool hasDest) { Compiler* compiler = JitTls::GetCompiler(); const unsigned operandStringLength = 6 * MAX_MULTIREG_COUNT + 1; char operandString[operandStringLength]; const char* emptyDestOperand = " "; char spillChar = ' '; if (mode == LinearScan::LSRA_DUMP_POST) { if ((tree->gtFlags & GTF_SPILL) != 0) { spillChar = 'S'; } if (!hasDest && tree->gtHasReg(compiler)) { // A node can define a register, but not produce a value for a parent to consume, // i.e. in the "localDefUse" case. // There used to be an assert here that we wouldn't spill such a node. // However, we can have unused lclVars that wind up being the node at which // it is spilled. This probably indicates a bug, but we don't realy want to // assert during a dump. if (spillChar == 'S') { spillChar = '$'; } else { spillChar = '*'; } hasDest = true; } } printf("%c N%03u. ", spillChar, tree->gtSeqNum); LclVarDsc* varDsc = nullptr; unsigned varNum = UINT_MAX; if (tree->IsLocal()) { varNum = tree->AsLclVarCommon()->GetLclNum(); varDsc = compiler->lvaGetDesc(varNum); if (varDsc->lvLRACandidate) { hasDest = false; } } if (hasDest) { if (mode == LinearScan::LSRA_DUMP_POST && tree->gtFlags & GTF_SPILLED) { assert(tree->gtHasReg(compiler)); } lsraGetOperandString(tree, mode, operandString, operandStringLength); printf("%-15s =", operandString); } else { printf("%-15s ", emptyDestOperand); } if (varDsc != nullptr) { if (varDsc->lvLRACandidate) { if (mode == LSRA_DUMP_REFPOS) { printf(" V%02u(L%d)", varNum, getIntervalForLocalVar(varDsc->lvVarIndex)->intervalIndex); } else { lsraGetOperandString(tree, mode, operandString, operandStringLength); printf(" V%02u(%s)", varNum, operandString); if (mode == LinearScan::LSRA_DUMP_POST && tree->gtFlags & GTF_SPILLED) { printf("R"); } } } else { printf(" V%02u MEM", varNum); } } else if (tree->OperIs(GT_ASG)) { assert(!tree->gtHasReg(compiler)); printf(" asg%s ", GenTree::OpName(tree->OperGet())); } else { compiler->gtDispNodeName(tree); if (tree->OperKind() & GTK_LEAF) { compiler->gtDispLeaf(tree, nullptr); } } } //------------------------------------------------------------------------ // DumpOperandDefs: dumps the registers defined by a node. // // Arguments: // operand - The operand for which to compute a register count. // // Returns: // The number of registers defined by `operand`. // void LinearScan::DumpOperandDefs( GenTree* operand, bool& first, LsraTupleDumpMode mode, char* operandString, const unsigned operandStringLength) { assert(operand != nullptr); assert(operandString != nullptr); if (operand->OperIs(GT_ARGPLACE)) { return; } int dstCount = ComputeOperandDstCount(operand); if (dstCount != 0) { // This operand directly produces registers; print it. if (!first) { printf(","); } lsraGetOperandString(operand, mode, operandString, operandStringLength); printf("%s", operandString); first = false; } else if (operand->isContained()) { // This is a contained node. Dump the defs produced by its operands. for (GenTree* op : operand->Operands()) { DumpOperandDefs(op, first, mode, operandString, operandStringLength); } } } void LinearScan::TupleStyleDump(LsraTupleDumpMode mode) { BasicBlock* block; LsraLocation currentLoc = 1; // 0 is the entry const unsigned operandStringLength = 6 * MAX_MULTIREG_COUNT + 1; char operandString[operandStringLength]; // currentRefPosition is not used for LSRA_DUMP_PRE // We keep separate iterators for defs, so that we can print them // on the lhs of the dump RefPositionIterator refPosIterator = refPositions.begin(); RefPosition* currentRefPosition = &refPosIterator; switch (mode) { case LSRA_DUMP_PRE: printf("TUPLE STYLE DUMP BEFORE LSRA\n"); break; case LSRA_DUMP_REFPOS: printf("TUPLE STYLE DUMP WITH REF POSITIONS\n"); break; case LSRA_DUMP_POST: printf("TUPLE STYLE DUMP WITH REGISTER ASSIGNMENTS\n"); break; default: printf("ERROR: INVALID TUPLE DUMP MODE\n"); return; } if (mode != LSRA_DUMP_PRE) { printf("Incoming Parameters: "); for (; refPosIterator != refPositions.end() && currentRefPosition->refType != RefTypeBB; ++refPosIterator, currentRefPosition = &refPosIterator) { Interval* interval = currentRefPosition->getInterval(); assert(interval != nullptr && interval->isLocalVar); printf(" V%02d", interval->varNum); if (mode == LSRA_DUMP_POST) { regNumber reg; if (currentRefPosition->registerAssignment == RBM_NONE) { reg = REG_STK; } else { reg = currentRefPosition->assignedReg(); } const LclVarDsc* varDsc = compiler->lvaGetDesc(interval->varNum); printf("("); regNumber assignedReg = varDsc->GetRegNum(); regNumber argReg = (varDsc->lvIsRegArg) ? varDsc->GetArgReg() : REG_STK; assert(reg == assignedReg || varDsc->lvRegister == false); if (reg != argReg) { printf(getRegName(argReg)); printf("=>"); } printf("%s)", getRegName(reg)); } } printf("\n"); } for (block = startBlockSequence(); block != nullptr; block = moveToNextBlock()) { currentLoc += 2; if (mode == LSRA_DUMP_REFPOS) { bool printedBlockHeader = false; // We should find the boundary RefPositions in the order of exposed uses, dummy defs, and the blocks for (; refPosIterator != refPositions.end() && (currentRefPosition->refType == RefTypeExpUse || currentRefPosition->refType == RefTypeDummyDef || (currentRefPosition->refType == RefTypeBB && !printedBlockHeader)); ++refPosIterator, currentRefPosition = &refPosIterator) { Interval* interval = nullptr; if (currentRefPosition->isIntervalRef()) { interval = currentRefPosition->getInterval(); } switch (currentRefPosition->refType) { case RefTypeExpUse: assert(interval != nullptr); assert(interval->isLocalVar); printf(" Exposed use of V%02u at #%d\n", interval->varNum, currentRefPosition->rpNum); break; case RefTypeDummyDef: assert(interval != nullptr); assert(interval->isLocalVar); printf(" Dummy def of V%02u at #%d\n", interval->varNum, currentRefPosition->rpNum); break; case RefTypeBB: block->dspBlockHeader(compiler); printedBlockHeader = true; printf("=====\n"); break; default: printf("Unexpected RefPosition type at #%d\n", currentRefPosition->rpNum); break; } } } else { block->dspBlockHeader(compiler); printf("=====\n"); } if (enregisterLocalVars && mode == LSRA_DUMP_POST && block != compiler->fgFirstBB && block->bbNum <= bbNumMaxBeforeResolution) { printf("Predecessor for variable locations: " FMT_BB "\n", blockInfo[block->bbNum].predBBNum); dumpInVarToRegMap(block); } if (block->bbNum > bbNumMaxBeforeResolution) { SplitEdgeInfo splitEdgeInfo; splitBBNumToTargetBBNumMap->Lookup(block->bbNum, &splitEdgeInfo); assert(splitEdgeInfo.toBBNum <= bbNumMaxBeforeResolution); assert(splitEdgeInfo.fromBBNum <= bbNumMaxBeforeResolution); printf("New block introduced for resolution from " FMT_BB " to " FMT_BB "\n", splitEdgeInfo.fromBBNum, splitEdgeInfo.toBBNum); } for (GenTree* node : LIR::AsRange(block)) { GenTree* tree = node; int produce = tree->IsValue() ? ComputeOperandDstCount(tree) : 0; int consume = ComputeAvailableSrcCount(tree); lsraDispNode(tree, mode, produce != 0 && mode != LSRA_DUMP_REFPOS); if (mode != LSRA_DUMP_REFPOS) { if (consume > 0) { printf("; "); bool first = true; for (GenTree* operand : tree->Operands()) { DumpOperandDefs(operand, first, mode, operandString, operandStringLength); } } } else { // Print each RefPosition on a new line, but // printing all the kills for each node on a single line // and combining the fixed regs with their associated def or use bool killPrinted = false; RefPosition* lastFixedRegRefPos = nullptr; for (; refPosIterator != refPositions.end() && (currentRefPosition->refType == RefTypeUse || currentRefPosition->refType == RefTypeFixedReg || currentRefPosition->refType == RefTypeKill || currentRefPosition->refType == RefTypeDef) && (currentRefPosition->nodeLocation == tree->gtSeqNum || currentRefPosition->nodeLocation == tree->gtSeqNum + 1); ++refPosIterator, currentRefPosition = &refPosIterator) { Interval* interval = nullptr; if (currentRefPosition->isIntervalRef()) { interval = currentRefPosition->getInterval(); } switch (currentRefPosition->refType) { case RefTypeUse: if (currentRefPosition->IsPhysRegRef()) { printf("\n Use:R%d(#%d)", currentRefPosition->getReg()->regNum, currentRefPosition->rpNum); } else { assert(interval != nullptr); printf("\n Use:"); interval->microDump(); printf("(#%d)", currentRefPosition->rpNum); if (currentRefPosition->isFixedRegRef && !interval->isInternal) { assert(genMaxOneBit(currentRefPosition->registerAssignment)); assert(lastFixedRegRefPos != nullptr); printf(" Fixed:%s(#%d)", getRegName(currentRefPosition->assignedReg()), lastFixedRegRefPos->rpNum); lastFixedRegRefPos = nullptr; } if (currentRefPosition->isLocalDefUse) { printf(" LocalDefUse"); } if (currentRefPosition->lastUse) { printf(" *"); } } break; case RefTypeDef: { // Print each def on a new line assert(interval != nullptr); printf("\n Def:"); interval->microDump(); printf("(#%d)", currentRefPosition->rpNum); if (currentRefPosition->isFixedRegRef) { assert(genMaxOneBit(currentRefPosition->registerAssignment)); printf(" %s", getRegName(currentRefPosition->assignedReg())); } if (currentRefPosition->isLocalDefUse) { printf(" LocalDefUse"); } if (currentRefPosition->lastUse) { printf(" *"); } if (interval->relatedInterval != nullptr) { printf(" Pref:"); interval->relatedInterval->microDump(); } } break; case RefTypeKill: if (!killPrinted) { printf("\n Kill: "); killPrinted = true; } printf(getRegName(currentRefPosition->assignedReg())); printf(" "); break; case RefTypeFixedReg: lastFixedRegRefPos = currentRefPosition; break; default: printf("Unexpected RefPosition type at #%d\n", currentRefPosition->rpNum); break; } } } printf("\n"); } if (enregisterLocalVars && mode == LSRA_DUMP_POST) { dumpOutVarToRegMap(block); } printf("\n"); } printf("\n\n"); } void LinearScan::dumpLsraAllocationEvent( LsraDumpEvent event, Interval* interval, regNumber reg, BasicBlock* currentBlock, RegisterScore registerScore) { if (!(VERBOSE)) { return; } if ((interval != nullptr) && (reg != REG_NA) && (reg != REG_STK)) { registersToDump |= getRegMask(reg, interval->registerType); dumpRegRecordTitleIfNeeded(); } switch (event) { // Conflicting def/use case LSRA_EVENT_DEFUSE_CONFLICT: dumpRefPositionShort(activeRefPosition, currentBlock); printf("DUconflict "); dumpRegRecords(); break; case LSRA_EVENT_DEFUSE_CASE1: printf(indentFormat, " Case #1 use defRegAssignment"); dumpRegRecords(); break; case LSRA_EVENT_DEFUSE_CASE2: printf(indentFormat, " Case #2 use useRegAssignment"); dumpRegRecords(); break; case LSRA_EVENT_DEFUSE_CASE3: printf(indentFormat, " Case #3 use useRegAssignment"); dumpRegRecords(); dumpRegRecords(); break; case LSRA_EVENT_DEFUSE_CASE4: printf(indentFormat, " Case #4 use defRegAssignment"); dumpRegRecords(); break; case LSRA_EVENT_DEFUSE_CASE5: printf(indentFormat, " Case #5 set def to all regs"); dumpRegRecords(); break; case LSRA_EVENT_DEFUSE_CASE6: printf(indentFormat, " Case #6 need a copy"); dumpRegRecords(); if (interval == nullptr) { printf(indentFormat, " NULL interval"); dumpRegRecords(); } else if (interval->firstRefPosition->multiRegIdx != 0) { printf(indentFormat, " (multiReg)"); dumpRegRecords(); } break; case LSRA_EVENT_SPILL: dumpRefPositionShort(activeRefPosition, currentBlock); assert(interval != nullptr && interval->assignedReg != nullptr); printf("Spill %-4s ", getRegName(interval->assignedReg->regNum)); dumpRegRecords(); break; // Restoring the previous register case LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL: case LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL_AFTER_SPILL: assert(interval != nullptr); if ((activeRefPosition == nullptr) || (activeRefPosition->refType == RefTypeBB)) { printf(emptyRefPositionFormat, ""); } else { dumpRefPositionShort(activeRefPosition, currentBlock); } printf((event == LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL) ? "Restr %-4s " : "SRstr %-4s ", getRegName(reg)); dumpRegRecords(); break; case LSRA_EVENT_DONE_KILL_GC_REFS: dumpRefPositionShort(activeRefPosition, currentBlock); printf("Done "); break; case LSRA_EVENT_NO_GC_KILLS: dumpRefPositionShort(activeRefPosition, currentBlock); printf("None "); break; // Block boundaries case LSRA_EVENT_START_BB: // The RefTypeBB comes after the RefTypeDummyDefs associated with that block, // so we may have a RefTypeDummyDef at the time we dump this event. // In that case we'll have another "EVENT" associated with it, so we need to // print the full line now. if (activeRefPosition->refType != RefTypeBB) { dumpNewBlock(currentBlock, activeRefPosition->nodeLocation); dumpRegRecords(); } else { dumpRefPositionShort(activeRefPosition, currentBlock); } break; // Allocation decisions case LSRA_EVENT_NEEDS_NEW_REG: dumpRefPositionShort(activeRefPosition, currentBlock); printf("Free %-4s ", getRegName(reg)); dumpRegRecords(); break; case LSRA_EVENT_ZERO_REF: assert(interval != nullptr && interval->isLocalVar); dumpRefPositionShort(activeRefPosition, currentBlock); printf("NoRef "); dumpRegRecords(); break; case LSRA_EVENT_FIXED_REG: case LSRA_EVENT_EXP_USE: case LSRA_EVENT_KEPT_ALLOCATION: dumpRefPositionShort(activeRefPosition, currentBlock); printf("Keep %-4s ", getRegName(reg)); break; case LSRA_EVENT_COPY_REG: assert(interval != nullptr && interval->recentRefPosition != nullptr); dumpRefPositionShort(activeRefPosition, currentBlock); if (allocationPassComplete || (registerScore == 0)) { printf("Copy %-4s ", getRegName(reg)); } else { printf("%-5s(C) %-4s ", getScoreName(registerScore), getRegName(reg)); } break; case LSRA_EVENT_MOVE_REG: assert(interval != nullptr && interval->recentRefPosition != nullptr); dumpRefPositionShort(activeRefPosition, currentBlock); printf("Move %-4s ", getRegName(reg)); dumpRegRecords(); break; case LSRA_EVENT_ALLOC_REG: dumpRefPositionShort(activeRefPosition, currentBlock); if (allocationPassComplete || (registerScore == 0)) { printf("Alloc %-4s ", getRegName(reg)); } else { printf("%-5s(A) %-4s ", getScoreName(registerScore), getRegName(reg)); } break; case LSRA_EVENT_REUSE_REG: dumpRefPositionShort(activeRefPosition, currentBlock); if (allocationPassComplete || (registerScore == 0)) { printf("Reuse %-4s ", getRegName(reg)); } else { printf("%-5s(A) %-4s ", getScoreName(registerScore), getRegName(reg)); } break; case LSRA_EVENT_NO_ENTRY_REG_ALLOCATED: assert(interval != nullptr && interval->isLocalVar); dumpRefPositionShort(activeRefPosition, currentBlock); printf("LoRef "); break; case LSRA_EVENT_NO_REG_ALLOCATED: dumpRefPositionShort(activeRefPosition, currentBlock); printf("NoReg "); break; case LSRA_EVENT_RELOAD: dumpRefPositionShort(activeRefPosition, currentBlock); printf("ReLod %-4s ", getRegName(reg)); dumpRegRecords(); break; case LSRA_EVENT_SPECIAL_PUTARG: dumpRefPositionShort(activeRefPosition, currentBlock); printf("PtArg %-4s ", getRegName(reg)); break; case LSRA_EVENT_UPPER_VECTOR_SAVE: dumpRefPositionShort(activeRefPosition, currentBlock); printf("UVSav %-4s ", getRegName(reg)); break; case LSRA_EVENT_UPPER_VECTOR_RESTORE: dumpRefPositionShort(activeRefPosition, currentBlock); printf("UVRes %-4s ", getRegName(reg)); break; // We currently don't dump anything for these events. case LSRA_EVENT_DEFUSE_FIXED_DELAY_USE: case LSRA_EVENT_SPILL_EXTENDED_LIFETIME: case LSRA_EVENT_END_BB: case LSRA_EVENT_FREE_REGS: case LSRA_EVENT_INCREMENT_RANGE_END: case LSRA_EVENT_LAST_USE: case LSRA_EVENT_LAST_USE_DELAYED: break; default: printf("????? %-4s ", getRegName(reg)); dumpRegRecords(); break; } } //------------------------------------------------------------------------ // dumpRegRecordHeader: Dump the header for a column-based dump of the register state. // // Arguments: // None. // // Return Value: // None. // // Assumptions: // Reg names fit in 4 characters (minimum width of the columns) // // Notes: // In order to make the table as dense as possible (for ease of reading the dumps), // we determine the minimum regColumnWidth width required to represent: // regs, by name (e.g. eax or xmm0) - this is fixed at 4 characters. // intervals, as Vnn for lclVar intervals, or as I<num> for other intervals. // The table is indented by the amount needed for dumpRefPositionShort, which is // captured in shortRefPositionDumpWidth. // void LinearScan::dumpRegRecordHeader() { printf("The following table has one or more rows for each RefPosition that is handled during allocation.\n" "The first column provides the basic information about the RefPosition, with its type (e.g. Def,\n" "Use, Fixd) followed by a '*' if it is a last use, and a 'D' if it is delayRegFree, and then the\n" "action taken during allocation (e.g. Alloc a new register, or Keep an existing one).\n" "The subsequent columns show the Interval occupying each register, if any, followed by 'a' if it is\n" "active, a 'p' if it is a large vector that has been partially spilled, and 'i'if it is inactive.\n" "Columns are only printed up to the last modifed register, which may increase during allocation,\n" "in which case additional columns will appear. \n" "Registers which are not marked modified have ---- in their column.\n\n"); // First, determine the width of each register column (which holds a reg name in the // header, and an interval name in each subsequent row). int intervalNumberWidth = (int)log10((double)intervals.size()) + 1; // The regColumnWidth includes the identifying character (I or V) and an 'i', 'p' or 'a' (inactive, // partially-spilled or active) regColumnWidth = intervalNumberWidth + 2; if (regColumnWidth < 4) { regColumnWidth = 4; } sprintf_s(intervalNameFormat, MAX_FORMAT_CHARS, "%%c%%-%dd", regColumnWidth - 2); sprintf_s(regNameFormat, MAX_FORMAT_CHARS, "%%-%ds", regColumnWidth); // Next, determine the width of the short RefPosition (see dumpRefPositionShort()). // This is in the form: // nnn.#mmm NAME TYPEld // Where: // nnn is the Location, right-justified to the width needed for the highest location. // mmm is the RefPosition rpNum, left-justified to the width needed for the highest rpNum. // NAME is dumped by dumpReferentName(), and is "regColumnWidth". // TYPE is RefTypeNameShort, and is 4 characters // l is either '*' (if a last use) or ' ' (otherwise) // d is either 'D' (if a delayed use) or ' ' (otherwise) maxNodeLocation = (maxNodeLocation == 0) ? 1 : maxNodeLocation; // corner case of a method with an infinite loop without any gentree nodes assert(maxNodeLocation >= 1); assert(refPositions.size() >= 1); int nodeLocationWidth = (int)log10((double)maxNodeLocation) + 1; int refPositionWidth = (int)log10((double)refPositions.size()) + 1; int refTypeInfoWidth = 4 /*TYPE*/ + 2 /* last-use and delayed */ + 1 /* space */; int locationAndRPNumWidth = nodeLocationWidth + 2 /* .# */ + refPositionWidth + 1 /* space */; int shortRefPositionDumpWidth = locationAndRPNumWidth + regColumnWidth + 1 /* space */ + refTypeInfoWidth; sprintf_s(shortRefPositionFormat, MAX_FORMAT_CHARS, "%%%dd.#%%-%dd ", nodeLocationWidth, refPositionWidth); sprintf_s(emptyRefPositionFormat, MAX_FORMAT_CHARS, "%%-%ds", shortRefPositionDumpWidth); // The width of the "allocation info" // - a 8-character allocation decision // - a space // - a 4-character register // - a space int allocationInfoWidth = 8 + 1 + 4 + 1; // Next, determine the width of the legend for each row. This includes: // - a short RefPosition dump (shortRefPositionDumpWidth), which includes a space // - the allocation info (allocationInfoWidth), which also includes a space regTableIndent = shortRefPositionDumpWidth + allocationInfoWidth; // BBnn printed left-justified in the NAME Typeld and allocationInfo space. int bbNumWidth = (int)log10((double)compiler->fgBBNumMax) + 1; // In the unlikely event that BB numbers overflow the space, we'll simply omit the predBB int predBBNumDumpSpace = regTableIndent - locationAndRPNumWidth - bbNumWidth - 9; // 'BB' + ' PredBB' if (predBBNumDumpSpace < bbNumWidth) { sprintf_s(bbRefPosFormat, MAX_LEGEND_FORMAT_CHARS, "BB%%-%dd", shortRefPositionDumpWidth - 2); } else { sprintf_s(bbRefPosFormat, MAX_LEGEND_FORMAT_CHARS, "BB%%-%dd PredBB%%-%dd", bbNumWidth, predBBNumDumpSpace); } if (compiler->shouldDumpASCIITrees()) { columnSeparator = "|"; line = "-"; leftBox = "+"; middleBox = "+"; rightBox = "+"; } else { columnSeparator = "\xe2\x94\x82"; line = "\xe2\x94\x80"; leftBox = "\xe2\x94\x9c"; middleBox = "\xe2\x94\xbc"; rightBox = "\xe2\x94\xa4"; } sprintf_s(indentFormat, MAX_FORMAT_CHARS, "%%-%ds", regTableIndent); // Now, set up the legend format for the RefPosition info sprintf_s(legendFormat, MAX_LEGEND_FORMAT_CHARS, "%%-%d.%ds%%-%d.%ds%%-%ds%%s", nodeLocationWidth + 1, nodeLocationWidth + 1, refPositionWidth + 2, refPositionWidth + 2, regColumnWidth + 1); // Print a "title row" including the legend and the reg names. lastDumpedRegisters = RBM_NONE; dumpRegRecordTitleIfNeeded(); } void LinearScan::dumpRegRecordTitleIfNeeded() { if ((lastDumpedRegisters != registersToDump) || (rowCountSinceLastTitle > MAX_ROWS_BETWEEN_TITLES)) { lastUsedRegNumIndex = 0; int lastRegNumIndex = compiler->compFloatingPointUsed ? REG_FP_LAST : REG_INT_LAST; for (int regNumIndex = 0; regNumIndex <= lastRegNumIndex; regNumIndex++) { if ((registersToDump & genRegMask((regNumber)regNumIndex)) != 0) { lastUsedRegNumIndex = regNumIndex; } } dumpRegRecordTitle(); lastDumpedRegisters = registersToDump; } } void LinearScan::dumpRegRecordTitleLines() { for (int i = 0; i < regTableIndent; i++) { printf("%s", line); } for (int regNumIndex = 0; regNumIndex <= lastUsedRegNumIndex; regNumIndex++) { regNumber regNum = (regNumber)regNumIndex; if (shouldDumpReg(regNum)) { printf("%s", middleBox); for (int i = 0; i < regColumnWidth; i++) { printf("%s", line); } } } printf("%s\n", rightBox); } void LinearScan::dumpRegRecordTitle() { dumpRegRecordTitleLines(); // Print out the legend for the RefPosition info printf(legendFormat, "Loc ", "RP# ", "Name ", "Type Action Reg "); // Print out the register name column headers char columnFormatArray[MAX_FORMAT_CHARS]; sprintf_s(columnFormatArray, MAX_FORMAT_CHARS, "%s%%-%d.%ds", columnSeparator, regColumnWidth, regColumnWidth); for (int regNumIndex = 0; regNumIndex <= lastUsedRegNumIndex; regNumIndex++) { regNumber regNum = (regNumber)regNumIndex; if (shouldDumpReg(regNum)) { const char* regName = getRegName(regNum); printf(columnFormatArray, regName); } } printf("%s\n", columnSeparator); rowCountSinceLastTitle = 0; dumpRegRecordTitleLines(); } void LinearScan::dumpRegRecords() { static char columnFormatArray[18]; for (regNumber regNum = REG_FIRST; regNum <= (regNumber)lastUsedRegNumIndex; regNum = REG_NEXT(regNum)) { if (shouldDumpReg(regNum)) { printf("%s", columnSeparator); RegRecord& regRecord = physRegs[regNum]; Interval* interval = regRecord.assignedInterval; if (interval != nullptr) { dumpIntervalName(interval); char activeChar = interval->isActive ? 'a' : 'i'; #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE if (interval->isPartiallySpilled) { activeChar = 'p'; } #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE printf("%c", activeChar); } else if ((genRegMask(regNum) & regsBusyUntilKill) != RBM_NONE) { printf(columnFormatArray, "Busy"); } else { sprintf_s(columnFormatArray, MAX_FORMAT_CHARS, "%%-%ds", regColumnWidth); printf(columnFormatArray, ""); } } } printf("%s\n", columnSeparator); rowCountSinceLastTitle++; } void LinearScan::dumpIntervalName(Interval* interval) { if (interval->isLocalVar) { printf(intervalNameFormat, 'V', interval->varNum); } else if (interval->IsUpperVector()) { printf(intervalNameFormat, 'U', interval->relatedInterval->varNum); } else if (interval->isConstant) { printf(intervalNameFormat, 'C', interval->intervalIndex); } else { printf(intervalNameFormat, 'I', interval->intervalIndex); } } void LinearScan::dumpEmptyRefPosition() { printf(emptyRefPositionFormat, ""); } //------------------------------------------------------------------------ // dumpNewBlock: Dump a line for a new block in a column-based dump of the register state. // // Arguments: // currentBlock - the new block to be dumped // void LinearScan::dumpNewBlock(BasicBlock* currentBlock, LsraLocation location) { if (!VERBOSE) { return; } // Always print a title row before a RefTypeBB (except for the first, because we // will already have printed it before the parameters) if ((currentBlock != compiler->fgFirstBB) && (currentBlock != nullptr)) { dumpRegRecordTitle(); } // If the activeRefPosition is a DummyDef, then don't print anything further (printing the // title line makes it clearer that we're "about to" start the next block). if (activeRefPosition->refType == RefTypeDummyDef) { dumpEmptyRefPosition(); printf("DDefs "); printf(regNameFormat, ""); return; } printf(shortRefPositionFormat, location, activeRefPosition->rpNum); if (currentBlock == nullptr) { printf(regNameFormat, "END"); printf(" "); printf(regNameFormat, ""); } else { printf(bbRefPosFormat, currentBlock->bbNum, currentBlock == compiler->fgFirstBB ? 0 : blockInfo[currentBlock->bbNum].predBBNum); } } // Note that the size of this dump is computed in dumpRegRecordHeader(). // void LinearScan::dumpRefPositionShort(RefPosition* refPosition, BasicBlock* currentBlock) { static RefPosition* lastPrintedRefPosition = nullptr; if (refPosition == lastPrintedRefPosition) { dumpEmptyRefPosition(); return; } lastPrintedRefPosition = refPosition; if (refPosition->refType == RefTypeBB) { dumpNewBlock(currentBlock, refPosition->nodeLocation); return; } printf(shortRefPositionFormat, refPosition->nodeLocation, refPosition->rpNum); if (refPosition->isIntervalRef()) { Interval* interval = refPosition->getInterval(); dumpIntervalName(interval); char lastUseChar = ' '; char delayChar = ' '; if (refPosition->lastUse) { lastUseChar = '*'; if (refPosition->delayRegFree) { delayChar = 'D'; } } printf(" %s%c%c ", getRefTypeShortName(refPosition->refType), lastUseChar, delayChar); } else if (refPosition->IsPhysRegRef()) { RegRecord* regRecord = refPosition->getReg(); printf(regNameFormat, getRegName(regRecord->regNum)); printf(" %s ", getRefTypeShortName(refPosition->refType)); } else { assert(refPosition->refType == RefTypeKillGCRefs); // There's no interval or reg name associated with this. printf(regNameFormat, " "); printf(" %s ", getRefTypeShortName(refPosition->refType)); } } //------------------------------------------------------------------------ // LinearScan::IsResolutionMove: // Returns true if the given node is a move inserted by LSRA // resolution. // // Arguments: // node - the node to check. // bool LinearScan::IsResolutionMove(GenTree* node) { if (!IsLsraAdded(node)) { return false; } switch (node->OperGet()) { case GT_LCL_VAR: case GT_COPY: return node->IsUnusedValue(); case GT_SWAP: return true; default: return false; } } //------------------------------------------------------------------------ // LinearScan::IsResolutionNode: // Returns true if the given node is either a move inserted by LSRA // resolution or an operand to such a move. // // Arguments: // containingRange - the range that contains the node to check. // node - the node to check. // bool LinearScan::IsResolutionNode(LIR::Range& containingRange, GenTree* node) { for (;;) { if (IsResolutionMove(node)) { return true; } if (!IsLsraAdded(node) || (node->OperGet() != GT_LCL_VAR)) { return false; } LIR::Use use; bool foundUse = containingRange.TryGetUse(node, &use); assert(foundUse); node = use.User(); } } //------------------------------------------------------------------------ // verifyFinalAllocation: Traverse the RefPositions and verify various invariants. // // Arguments: // None. // // Return Value: // None. // // Notes: // If verbose is set, this will also dump a table of the final allocations. void LinearScan::verifyFinalAllocation() { if (VERBOSE) { printf("\nFinal allocation\n"); } // Clear register assignments. for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg)) { RegRecord* physRegRecord = getRegisterRecord(reg); physRegRecord->assignedInterval = nullptr; } for (Interval& interval : intervals) { interval.assignedReg = nullptr; interval.physReg = REG_NA; } DBEXEC(VERBOSE, dumpRegRecordTitle()); BasicBlock* currentBlock = nullptr; GenTree* firstBlockEndResolutionNode = nullptr; LsraLocation currentLocation = MinLocation; for (RefPosition& refPosition : refPositions) { RefPosition* currentRefPosition = &refPosition; Interval* interval = nullptr; RegRecord* regRecord = nullptr; regNumber regNum = REG_NA; activeRefPosition = currentRefPosition; if (currentRefPosition->refType != RefTypeBB) { if (currentRefPosition->IsPhysRegRef()) { regRecord = currentRefPosition->getReg(); regRecord->recentRefPosition = currentRefPosition; regNum = regRecord->regNum; } else if (currentRefPosition->isIntervalRef()) { interval = currentRefPosition->getInterval(); interval->recentRefPosition = currentRefPosition; if (currentRefPosition->registerAssignment != RBM_NONE) { if (!genMaxOneBit(currentRefPosition->registerAssignment)) { assert(currentRefPosition->refType == RefTypeExpUse || currentRefPosition->refType == RefTypeDummyDef); } else { regNum = currentRefPosition->assignedReg(); regRecord = getRegisterRecord(regNum); } } } } LsraLocation newLocation = currentRefPosition->nodeLocation; currentLocation = newLocation; switch (currentRefPosition->refType) { case RefTypeBB: { if (currentBlock == nullptr) { currentBlock = startBlockSequence(); } else { // Verify the resolution moves at the end of the previous block. for (GenTree* node = firstBlockEndResolutionNode; node != nullptr; node = node->gtNext) { assert(enregisterLocalVars); // Only verify nodes that are actually moves; don't bother with the nodes that are // operands to moves. if (IsResolutionMove(node)) { verifyResolutionMove(node, currentLocation); } } // Validate the locations at the end of the previous block. if (enregisterLocalVars) { VarToRegMap outVarToRegMap = outVarToRegMaps[currentBlock->bbNum]; VarSetOps::Iter iter(compiler, currentBlock->bbLiveOut); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { if (localVarIntervals[varIndex] == nullptr) { assert(!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate); continue; } regNumber regNum = getVarReg(outVarToRegMap, varIndex); interval = getIntervalForLocalVar(varIndex); if (interval->physReg != regNum) { assert(regNum == REG_STK); assert((interval->physReg == REG_NA) || interval->isWriteThru); } interval->physReg = REG_NA; interval->assignedReg = nullptr; interval->isActive = false; } } // Clear register assignments. for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg)) { RegRecord* physRegRecord = getRegisterRecord(reg); physRegRecord->assignedInterval = nullptr; } // Now, record the locations at the beginning of this block. currentBlock = moveToNextBlock(); } if (currentBlock != nullptr) { if (enregisterLocalVars) { VarToRegMap inVarToRegMap = inVarToRegMaps[currentBlock->bbNum]; VarSetOps::Iter iter(compiler, currentBlock->bbLiveIn); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { if (localVarIntervals[varIndex] == nullptr) { assert(!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate); continue; } regNumber regNum = getVarReg(inVarToRegMap, varIndex); interval = getIntervalForLocalVar(varIndex); interval->physReg = regNum; interval->assignedReg = &(physRegs[regNum]); interval->isActive = true; physRegs[regNum].assignedInterval = interval; } } if (VERBOSE) { dumpRefPositionShort(currentRefPosition, currentBlock); dumpRegRecords(); } // Finally, handle the resolution moves, if any, at the beginning of the next block. firstBlockEndResolutionNode = nullptr; bool foundNonResolutionNode = false; LIR::Range& currentBlockRange = LIR::AsRange(currentBlock); for (GenTree* node : currentBlockRange) { if (IsResolutionNode(currentBlockRange, node)) { assert(enregisterLocalVars); if (foundNonResolutionNode) { firstBlockEndResolutionNode = node; break; } else if (IsResolutionMove(node)) { // Only verify nodes that are actually moves; don't bother with the nodes that are // operands to moves. verifyResolutionMove(node, currentLocation); } } else { foundNonResolutionNode = true; } } } } break; case RefTypeKill: assert(regRecord != nullptr); assert(regRecord->assignedInterval == nullptr); dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, regRecord->regNum, currentBlock); break; case RefTypeFixedReg: assert(regRecord != nullptr); dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, regRecord->regNum, currentBlock); break; case RefTypeUpperVectorSave: dumpLsraAllocationEvent(LSRA_EVENT_UPPER_VECTOR_SAVE, nullptr, REG_NA, currentBlock); break; case RefTypeUpperVectorRestore: dumpLsraAllocationEvent(LSRA_EVENT_UPPER_VECTOR_RESTORE, nullptr, REG_NA, currentBlock); break; case RefTypeDef: case RefTypeUse: case RefTypeParamDef: case RefTypeZeroInit: assert(interval != nullptr); if (interval->isSpecialPutArg) { dumpLsraAllocationEvent(LSRA_EVENT_SPECIAL_PUTARG, interval, regNum); break; } if (currentRefPosition->reload) { interval->isActive = true; assert(regNum != REG_NA); interval->physReg = regNum; interval->assignedReg = regRecord; regRecord->assignedInterval = interval; dumpLsraAllocationEvent(LSRA_EVENT_RELOAD, nullptr, regRecord->regNum, currentBlock); } if (regNum == REG_NA) { // If this interval is still assigned to a register if (interval->physReg != REG_NA) { // then unassign it if no new register was assigned to the RefTypeDef if (RefTypeIsDef(currentRefPosition->refType)) { assert(interval->assignedReg != nullptr); if (interval->assignedReg->assignedInterval == interval) { interval->assignedReg->assignedInterval = nullptr; } interval->physReg = REG_NA; interval->assignedReg = nullptr; } } dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, interval); } else if (RefTypeIsDef(currentRefPosition->refType)) { interval->isActive = true; if (VERBOSE) { if (interval->isConstant && (currentRefPosition->treeNode != nullptr) && currentRefPosition->treeNode->IsReuseRegVal()) { dumpLsraAllocationEvent(LSRA_EVENT_REUSE_REG, nullptr, regRecord->regNum, currentBlock); } else { dumpLsraAllocationEvent(LSRA_EVENT_ALLOC_REG, nullptr, regRecord->regNum, currentBlock); } } } else if (currentRefPosition->copyReg) { dumpLsraAllocationEvent(LSRA_EVENT_COPY_REG, interval, regRecord->regNum, currentBlock); } else if (currentRefPosition->moveReg) { assert(interval->assignedReg != nullptr); interval->assignedReg->assignedInterval = nullptr; interval->physReg = regNum; interval->assignedReg = regRecord; regRecord->assignedInterval = interval; if (VERBOSE) { dumpEmptyRefPosition(); printf("Move %-4s ", getRegName(regRecord->regNum)); } } else { dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, regRecord->regNum, currentBlock); } if (currentRefPosition->lastUse || (currentRefPosition->spillAfter && !currentRefPosition->writeThru)) { interval->isActive = false; } if (regNum != REG_NA) { if (currentRefPosition->spillAfter) { if (VERBOSE) { // If refPos is marked as copyReg, then the reg that is spilled // is the homeReg of the interval not the reg currently assigned // to refPos. regNumber spillReg = regNum; if (currentRefPosition->copyReg) { assert(interval != nullptr); spillReg = interval->physReg; } dumpRegRecords(); dumpEmptyRefPosition(); if (currentRefPosition->writeThru) { printf("WThru %-4s ", getRegName(spillReg)); } else { printf("Spill %-4s ", getRegName(spillReg)); } } } else if (currentRefPosition->copyReg) { regRecord->assignedInterval = interval; } else { if (RefTypeIsDef(currentRefPosition->refType)) { // Interval was assigned to a different register. // Clear the assigned interval of current register. if (interval->physReg != REG_NA && interval->physReg != regNum) { interval->assignedReg->assignedInterval = nullptr; } } interval->physReg = regNum; interval->assignedReg = regRecord; regRecord->assignedInterval = interval; } } break; case RefTypeKillGCRefs: // No action to take. // However, we will assert that, at resolution time, no registers contain GC refs. { DBEXEC(VERBOSE, printf(" ")); regMaskTP candidateRegs = currentRefPosition->registerAssignment; while (candidateRegs != RBM_NONE) { regMaskTP nextRegBit = genFindLowestBit(candidateRegs); candidateRegs &= ~nextRegBit; regNumber nextReg = genRegNumFromMask(nextRegBit); RegRecord* regRecord = getRegisterRecord(nextReg); Interval* assignedInterval = regRecord->assignedInterval; assert(assignedInterval == nullptr || !varTypeIsGC(assignedInterval->registerType)); } } break; case RefTypeExpUse: case RefTypeDummyDef: // Do nothing; these will be handled by the RefTypeBB. DBEXEC(VERBOSE, dumpRefPositionShort(currentRefPosition, currentBlock)); DBEXEC(VERBOSE, printf(" ")); break; case RefTypeInvalid: // for these 'currentRefPosition->refType' values, No action to take break; } if (currentRefPosition->refType != RefTypeBB) { DBEXEC(VERBOSE, dumpRegRecords()); if (interval != nullptr) { if (currentRefPosition->copyReg) { assert(interval->physReg != regNum); regRecord->assignedInterval = nullptr; assert(interval->assignedReg != nullptr); regRecord = interval->assignedReg; } if (currentRefPosition->spillAfter || currentRefPosition->lastUse) { assert(!currentRefPosition->spillAfter || currentRefPosition->IsActualRef()); if (RefTypeIsDef(currentRefPosition->refType)) { // If an interval got assigned to a different register (while the different // register got spilled), then clear the assigned interval of current register. if (interval->physReg != REG_NA && interval->physReg != regNum) { interval->assignedReg->assignedInterval = nullptr; } } interval->physReg = REG_NA; interval->assignedReg = nullptr; // regRegcord could be null if the RefPosition does not require a register. if (regRecord != nullptr) { regRecord->assignedInterval = nullptr; } #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE else if (interval->isUpperVector && !currentRefPosition->RegOptional()) { // These only require a register if they are not RegOptional, and their lclVar // interval is living in a register and not already partially spilled. if ((currentRefPosition->refType == RefTypeUpperVectorSave) || (currentRefPosition->refType == RefTypeUpperVectorRestore)) { Interval* lclVarInterval = interval->relatedInterval; assert((lclVarInterval->physReg == REG_NA) || lclVarInterval->isPartiallySpilled); } } #endif else { assert(currentRefPosition->RegOptional()); } } } } } // Now, verify the resolution blocks. // Currently these are nearly always at the end of the method, but that may not always be the case. // So, we'll go through all the BBs looking for blocks whose bbNum is greater than bbNumMaxBeforeResolution. for (BasicBlock* const currentBlock : compiler->Blocks()) { if (currentBlock->bbNum > bbNumMaxBeforeResolution) { // If we haven't enregistered an lclVars, we have no resolution blocks. assert(enregisterLocalVars); if (VERBOSE) { dumpRegRecordTitle(); printf(shortRefPositionFormat, 0, 0); assert(currentBlock->bbPreds != nullptr && currentBlock->bbPreds->getBlock() != nullptr); printf(bbRefPosFormat, currentBlock->bbNum, currentBlock->bbPreds->getBlock()->bbNum); dumpRegRecords(); } // Clear register assignments. for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg)) { RegRecord* physRegRecord = getRegisterRecord(reg); physRegRecord->assignedInterval = nullptr; } // Set the incoming register assignments VarToRegMap inVarToRegMap = getInVarToRegMap(currentBlock->bbNum); VarSetOps::Iter iter(compiler, currentBlock->bbLiveIn); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { if (localVarIntervals[varIndex] == nullptr) { assert(!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate); continue; } regNumber regNum = getVarReg(inVarToRegMap, varIndex); Interval* interval = getIntervalForLocalVar(varIndex); interval->physReg = regNum; interval->assignedReg = &(physRegs[regNum]); interval->isActive = true; physRegs[regNum].assignedInterval = interval; } // Verify the moves in this block LIR::Range& currentBlockRange = LIR::AsRange(currentBlock); for (GenTree* node : currentBlockRange) { assert(IsResolutionNode(currentBlockRange, node)); if (IsResolutionMove(node)) { // Only verify nodes that are actually moves; don't bother with the nodes that are // operands to moves. verifyResolutionMove(node, currentLocation); } } // Verify the outgoing register assignments { VarToRegMap outVarToRegMap = getOutVarToRegMap(currentBlock->bbNum); VarSetOps::Iter iter(compiler, currentBlock->bbLiveOut); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { if (localVarIntervals[varIndex] == nullptr) { assert(!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate); continue; } regNumber regNum = getVarReg(outVarToRegMap, varIndex); Interval* interval = getIntervalForLocalVar(varIndex); // Either the register assignments match, or the outgoing assignment is on the stack // and this is a write-thru interval. assert(interval->physReg == regNum || (interval->physReg == REG_NA && regNum == REG_STK) || (interval->isWriteThru && regNum == REG_STK)); interval->physReg = REG_NA; interval->assignedReg = nullptr; interval->isActive = false; } } } } DBEXEC(VERBOSE, printf("\n")); } //------------------------------------------------------------------------ // verifyResolutionMove: Verify a resolution statement. Called by verifyFinalAllocation() // // Arguments: // resolutionMove - A GenTree* that must be a resolution move. // currentLocation - The LsraLocation of the most recent RefPosition that has been verified. // // Return Value: // None. // // Notes: // If verbose is set, this will also dump the moves into the table of final allocations. void LinearScan::verifyResolutionMove(GenTree* resolutionMove, LsraLocation currentLocation) { GenTree* dst = resolutionMove; assert(IsResolutionMove(dst)); if (dst->OperGet() == GT_SWAP) { GenTreeLclVarCommon* left = dst->gtGetOp1()->AsLclVarCommon(); GenTreeLclVarCommon* right = dst->gtGetOp2()->AsLclVarCommon(); regNumber leftRegNum = left->GetRegNum(); regNumber rightRegNum = right->GetRegNum(); LclVarDsc* leftVarDsc = compiler->lvaGetDesc(left); LclVarDsc* rightVarDsc = compiler->lvaGetDesc(right); Interval* leftInterval = getIntervalForLocalVar(leftVarDsc->lvVarIndex); Interval* rightInterval = getIntervalForLocalVar(rightVarDsc->lvVarIndex); assert(leftInterval->physReg == leftRegNum && rightInterval->physReg == rightRegNum); leftInterval->physReg = rightRegNum; rightInterval->physReg = leftRegNum; leftInterval->assignedReg = &physRegs[rightRegNum]; rightInterval->assignedReg = &physRegs[leftRegNum]; physRegs[rightRegNum].assignedInterval = leftInterval; physRegs[leftRegNum].assignedInterval = rightInterval; if (VERBOSE) { printf(shortRefPositionFormat, currentLocation, 0); dumpIntervalName(leftInterval); printf(" Swap "); printf(" %-4s ", getRegName(rightRegNum)); dumpRegRecords(); printf(shortRefPositionFormat, currentLocation, 0); dumpIntervalName(rightInterval); printf(" \" "); printf(" %-4s ", getRegName(leftRegNum)); dumpRegRecords(); } return; } regNumber dstRegNum = dst->GetRegNum(); regNumber srcRegNum; GenTreeLclVarCommon* lcl; if (dst->OperGet() == GT_COPY) { lcl = dst->gtGetOp1()->AsLclVarCommon(); srcRegNum = lcl->GetRegNum(); } else { lcl = dst->AsLclVarCommon(); if ((lcl->gtFlags & GTF_SPILLED) != 0) { srcRegNum = REG_STK; } else { assert((lcl->gtFlags & GTF_SPILL) != 0); srcRegNum = dstRegNum; dstRegNum = REG_STK; } } Interval* interval = getIntervalForLocalVarNode(lcl); assert(interval->physReg == srcRegNum || (srcRegNum == REG_STK && interval->physReg == REG_NA)); if (srcRegNum != REG_STK) { physRegs[srcRegNum].assignedInterval = nullptr; } if (dstRegNum != REG_STK) { interval->physReg = dstRegNum; interval->assignedReg = &(physRegs[dstRegNum]); physRegs[dstRegNum].assignedInterval = interval; interval->isActive = true; } else { interval->physReg = REG_NA; interval->assignedReg = nullptr; interval->isActive = false; } if (VERBOSE) { printf(shortRefPositionFormat, currentLocation, 0); dumpIntervalName(interval); printf(" Move "); printf(" %-4s ", getRegName(dstRegNum)); dumpRegRecords(); } } #endif // DEBUG LinearScan::RegisterSelection::RegisterSelection(LinearScan* linearScan) { this->linearScan = linearScan; #ifdef DEBUG mappingTable = new ScoreMappingTable(linearScan->compiler->getAllocator(CMK_LSRA)); #define REG_SEL_DEF(stat, value, shortname, orderSeqId) \ mappingTable->Set(stat, &LinearScan::RegisterSelection::try_##stat); #include "lsra_score.h" #undef REG_SEL_DEF LPCWSTR ordering = JitConfig.JitLsraOrdering(); if (ordering == nullptr) { ordering = W("ABCDEFGHIJKLMNOPQ"); } for (int orderId = 0; orderId < REGSELECT_HEURISTIC_COUNT; orderId++) { // Make sure we do not set repeated entries assert(RegSelectionOrder[orderId] == NONE); switch (ordering[orderId]) { #define REG_SEL_DEF(enum_name, value, shortname, orderSeqId) \ case orderSeqId: \ RegSelectionOrder[orderId] = enum_name; \ break; #include "lsra_score.h" #undef REG_SEL_DEF default: assert(!"Invalid lsraOrdering value."); } } #endif // DEBUG } // ---------------------------------------------------------- // reset: Resets the values of all the fields used for register selection. // void LinearScan::RegisterSelection::reset(Interval* interval, RefPosition* refPos) { currentInterval = interval; refPosition = refPos; score = 0; regType = linearScan->getRegisterType(currentInterval, refPosition); currentLocation = refPosition->nodeLocation; nextRefPos = refPosition->nextRefPosition; candidates = refPosition->registerAssignment; preferences = currentInterval->registerPreferences; // This is not actually a preference, it's merely to track the lclVar that this // "specialPutArg" is using. relatedInterval = currentInterval->isSpecialPutArg ? nullptr : currentInterval->relatedInterval; relatedPreferences = (relatedInterval == nullptr) ? RBM_NONE : relatedInterval->getCurrentPreferences(); rangeEndLocation = refPosition->getRangeEndLocation(); relatedLastLocation = rangeEndLocation; preferCalleeSave = currentInterval->preferCalleeSave; rangeEndRefPosition = nullptr; lastRefPosition = currentInterval->lastRefPosition; lastLocation = MinLocation; prevRegRec = currentInterval->assignedReg; // These are used in the post-selection updates, and must be set for any selection. freeCandidates = RBM_NONE; matchingConstants = RBM_NONE; unassignedSet = RBM_NONE; coversSet = RBM_NONE; preferenceSet = RBM_NONE; coversRelatedSet = RBM_NONE; coversFullSet = RBM_NONE; foundRegBit = REG_NA; found = false; skipAllocation = false; coversSetsCalculated = false; } // ---------------------------------------------------------- // applySelection: Apply the heuristic to the candidates. // // Arguments: // selectionScore: The score corresponding to the heuristics we apply. // selectionCandidates: The possible candidates for the heuristic to apply. // // Return Values: // 'true' if there was a single register candidate available after the heuristic is applied. // bool LinearScan::RegisterSelection::applySelection(int selectionScore, regMaskTP selectionCandidates) { regMaskTP newCandidates = candidates & selectionCandidates; if (newCandidates != RBM_NONE) { score += selectionScore; candidates = newCandidates; return LinearScan::isSingleRegister(candidates); } return false; } // ---------------------------------------------------------- // applySingleRegSelection: Select a single register, if it is in the candidate set. // // Arguments: // selectionScore: The score corresponding to the heuristics we apply. // selectionCandidates: The possible candidates for the heuristic to apply. // // Return Values: // 'true' if there was a single register candidate available after the heuristic is applied. // bool LinearScan::RegisterSelection::applySingleRegSelection(int selectionScore, regMaskTP selectionCandidate) { assert(LinearScan::isSingleRegister(selectionCandidate)); regMaskTP newCandidates = candidates & selectionCandidate; if (newCandidates != RBM_NONE) { candidates = newCandidates; return true; } return false; } // ---------------------------------------------------------- // try_FREE: Apply the FREE heuristic. // void LinearScan::RegisterSelection::try_FREE() { assert(!found); if (freeCandidates == RBM_NONE) { return; } found = applySelection(FREE, freeCandidates); } // ---------------------------------------------------------- // try_CONST_AVAILABLE: Apply the CONST_AVAILABLE (matching constant) heuristic. // // Note: we always need to define the 'matchingConstants' set. // void LinearScan::RegisterSelection::try_CONST_AVAILABLE() { assert(!found); if (freeCandidates == RBM_NONE) { return; } if (currentInterval->isConstant && RefTypeIsDef(refPosition->refType)) { found = applySelection(CONST_AVAILABLE, matchingConstants); } } // ---------------------------------------------------------- // try_THIS_ASSIGNED: Apply the THIS_ASSIGNED heuristic. // void LinearScan::RegisterSelection::try_THIS_ASSIGNED() { assert(!found); if (freeCandidates == RBM_NONE) { return; } if (prevRegRec != nullptr) { found = applySelection(THIS_ASSIGNED, freeCandidates & preferences & prevRegBit); } } // ---------------------------------------------------------- // try_COVERS: Apply the COVERS heuristic. // void LinearScan::RegisterSelection::try_COVERS() { assert(!found); calculateCoversSets(); found = applySelection(COVERS, coversSet & preferenceSet); } // ---------------------------------------------------------- // try_OWN_PREFERENCE: Apply the OWN_PREFERENCE heuristic. // // Note: 'preferenceSet' already includes only freeCandidates. // void LinearScan::RegisterSelection::try_OWN_PREFERENCE() { assert(!found); #ifdef DEBUG calculateCoversSets(); #endif found = applySelection(OWN_PREFERENCE, (preferenceSet & freeCandidates)); } // ---------------------------------------------------------- // try_COVERS_RELATED: Apply the COVERS_RELATED heuristic. // void LinearScan::RegisterSelection::try_COVERS_RELATED() { assert(!found); #ifdef DEBUG calculateCoversSets(); #endif found = applySelection(COVERS_RELATED, (coversRelatedSet & freeCandidates)); } // ---------------------------------------------------------- // try_RELATED_PREFERENCE: Apply the RELATED_PREFERENCE heuristic. // void LinearScan::RegisterSelection::try_RELATED_PREFERENCE() { assert(!found); found = applySelection(RELATED_PREFERENCE, relatedPreferences & freeCandidates); } // ---------------------------------------------------------- // try_CALLER_CALLEE: Apply the CALLER_CALLEE heuristic. // void LinearScan::RegisterSelection::try_CALLER_CALLEE() { assert(!found); found = applySelection(CALLER_CALLEE, callerCalleePrefs & freeCandidates); } // ---------------------------------------------------------- // try_UNASSIGNED: Apply the UNASSIGNED heuristic. // void LinearScan::RegisterSelection::try_UNASSIGNED() { assert(!found); #ifdef DEBUG calculateCoversSets(); #endif found = applySelection(UNASSIGNED, unassignedSet); } // ---------------------------------------------------------- // try_COVERS_FULL: Apply the COVERS_FULL heuristic. // void LinearScan::RegisterSelection::try_COVERS_FULL() { assert(!found); #ifdef DEBUG calculateCoversSets(); #endif found = applySelection(COVERS_FULL, (coversFullSet & freeCandidates)); } // ---------------------------------------------------------- // try_BEST_FIT: Apply the BEST_FIT heuristic. // void LinearScan::RegisterSelection::try_BEST_FIT() { assert(!found); if (freeCandidates == RBM_NONE) { return; } regMaskTP bestFitSet = RBM_NONE; // If the best score includes COVERS_FULL, pick the one that's killed soonest. // If none cover the full range, the BEST_FIT is the one that's killed later. bool earliestIsBest = ((score & COVERS_FULL) != 0); LsraLocation bestFitLocation = earliestIsBest ? MaxLocation : MinLocation; for (regMaskTP bestFitCandidates = candidates; bestFitCandidates != RBM_NONE;) { regMaskTP bestFitCandidateBit = genFindLowestBit(bestFitCandidates); bestFitCandidates &= ~bestFitCandidateBit; regNumber bestFitCandidateRegNum = genRegNumFromMask(bestFitCandidateBit); // Find the next RefPosition of the register. LsraLocation nextIntervalLocation = linearScan->getNextIntervalRef(bestFitCandidateRegNum, regType); LsraLocation nextPhysRefLocation = linearScan->getNextFixedRef(bestFitCandidateRegNum, regType); nextPhysRefLocation = Min(nextPhysRefLocation, nextIntervalLocation); // If the nextPhysRefLocation is a fixedRef for the rangeEndRefPosition, increment it so that // we don't think it isn't covering the live range. // This doesn't handle the case where earlier RefPositions for this Interval are also // FixedRefs of this regNum, but at least those are only interesting in the case where those // are "local last uses" of the Interval - otherwise the liveRange would interfere with the reg. // TODO: This duplicates code in an earlier loop, and is basically here to duplicate previous // behavior; see if we can avoid this. if (nextPhysRefLocation == rangeEndLocation && rangeEndRefPosition->isFixedRefOfReg(bestFitCandidateRegNum)) { INDEBUG(linearScan->dumpLsraAllocationEvent(LSRA_EVENT_INCREMENT_RANGE_END, currentInterval)); nextPhysRefLocation++; } if (nextPhysRefLocation == bestFitLocation) { bestFitSet |= bestFitCandidateBit; } else { bool isBetter = false; if (nextPhysRefLocation > lastLocation) { // This covers the full range; favor it if the other doesn't, or if it's a closer match. if ((bestFitLocation <= lastLocation) || (nextPhysRefLocation < bestFitLocation)) { isBetter = true; } } else { // This doesn't cover the full range; favor it if the other doesn't either, but this ends later. if ((bestFitLocation <= lastLocation) && (nextPhysRefLocation > bestFitLocation)) { isBetter = true; } } if (isBetter) { bestFitSet = bestFitCandidateBit; bestFitLocation = nextPhysRefLocation; } } } assert(bestFitSet != RBM_NONE); found = applySelection(BEST_FIT, bestFitSet); } // ---------------------------------------------------------- // try_IS_PREV_REG: Apply the IS_PREV_REG heuristic. // // Note: Oddly, the previous heuristics only considered this if it covered the range. // TODO: Check if Only applies if we have freeCandidates. // void LinearScan::RegisterSelection::try_IS_PREV_REG() { // TODO: We do not check found here. if ((prevRegRec != nullptr) && ((score & COVERS_FULL) != 0)) { found = applySingleRegSelection(IS_PREV_REG, prevRegBit); } } // ---------------------------------------------------------- // try_REG_ORDER: Apply the REG_ORDER heuristic. Only applies if we have freeCandidates. // void LinearScan::RegisterSelection::try_REG_ORDER() { assert(!found); if (freeCandidates == RBM_NONE) { return; } // This will always result in a single candidate. That is, it is the tie-breaker // for free candidates, and doesn't make sense as anything other than the last // heuristic for free registers. unsigned lowestRegOrder = UINT_MAX; regMaskTP lowestRegOrderBit = RBM_NONE; for (regMaskTP regOrderCandidates = candidates; regOrderCandidates != RBM_NONE;) { regMaskTP regOrderCandidateBit = genFindLowestBit(regOrderCandidates); regOrderCandidates &= ~regOrderCandidateBit; regNumber regOrderCandidateRegNum = genRegNumFromMask(regOrderCandidateBit); unsigned thisRegOrder = linearScan->getRegisterRecord(regOrderCandidateRegNum)->regOrder; if (thisRegOrder < lowestRegOrder) { lowestRegOrder = thisRegOrder; lowestRegOrderBit = regOrderCandidateBit; } } assert(lowestRegOrderBit != RBM_NONE); found = applySingleRegSelection(REG_ORDER, lowestRegOrderBit); } // ---------------------------------------------------------- // try_SPILL_COST: Apply the SPILL_COST heuristic. // void LinearScan::RegisterSelection::try_SPILL_COST() { assert(!found); // The set of registers with the lowest spill weight. regMaskTP lowestCostSpillSet = RBM_NONE; // Apply the SPILL_COST heuristic and eliminate regs that can't be spilled. // The spill weight for 'refPosition' (the one we're allocating now). weight_t thisSpillWeight = linearScan->getWeight(refPosition); // The spill weight for the best candidate we've found so far. weight_t bestSpillWeight = FloatingPointUtils::infinite_double(); // True if we found registers with lower spill weight than this refPosition. bool foundLowerSpillWeight = false; for (regMaskTP spillCandidates = candidates; spillCandidates != RBM_NONE;) { regMaskTP spillCandidateBit = genFindLowestBit(spillCandidates); spillCandidates &= ~spillCandidateBit; regNumber spillCandidateRegNum = genRegNumFromMask(spillCandidateBit); RegRecord* spillCandidateRegRecord = &linearScan->physRegs[spillCandidateRegNum]; Interval* assignedInterval = spillCandidateRegRecord->assignedInterval; // Can and should the interval in this register be spilled for this one, // if we don't find a better alternative? if ((linearScan->getNextIntervalRef(spillCandidateRegNum, regType) == currentLocation) && !assignedInterval->getNextRefPosition()->RegOptional()) { continue; } if (!linearScan->isSpillCandidate(currentInterval, refPosition, spillCandidateRegRecord)) { continue; } weight_t currentSpillWeight = 0; RefPosition* recentRefPosition = assignedInterval != nullptr ? assignedInterval->recentRefPosition : nullptr; if ((recentRefPosition != nullptr) && (recentRefPosition->RegOptional() && !(assignedInterval->isLocalVar && recentRefPosition->IsActualRef()))) { // We do not "spillAfter" if previous (recent) refPosition was regOptional or if it // is not an actual ref. In those cases, we will reload in future (next) refPosition. // For such cases, consider the spill cost of next refposition. // See notes in "spillInterval()". RefPosition* reloadRefPosition = assignedInterval->getNextRefPosition(); if (reloadRefPosition != nullptr) { currentSpillWeight = linearScan->getWeight(reloadRefPosition); } } // Only consider spillCost if we were not able to calculate weight of reloadRefPosition. if (currentSpillWeight == 0) { currentSpillWeight = linearScan->spillCost[spillCandidateRegNum]; #ifdef TARGET_ARM if (currentInterval->registerType == TYP_DOUBLE) { currentSpillWeight = max(currentSpillWeight, linearScan->spillCost[REG_NEXT(spillCandidateRegNum)]); } #endif } if (currentSpillWeight < bestSpillWeight) { bestSpillWeight = currentSpillWeight; lowestCostSpillSet = spillCandidateBit; } else if (currentSpillWeight == bestSpillWeight) { lowestCostSpillSet |= spillCandidateBit; } } if (lowestCostSpillSet == RBM_NONE) { return; } // We won't spill if this refPosition is RegOptional() and we have no candidates // with a lower spill cost. if ((bestSpillWeight >= thisSpillWeight) && refPosition->RegOptional()) { currentInterval->assignedReg = nullptr; skipAllocation = true; found = true; } // We must have at least one with the lowest spill cost. assert(lowestCostSpillSet != RBM_NONE); found = applySelection(SPILL_COST, lowestCostSpillSet); } // ---------------------------------------------------------- // try_FAR_NEXT_REF: Apply the FAR_NEXT_REF heuristic. // void LinearScan::RegisterSelection::try_FAR_NEXT_REF() { assert(!found); LsraLocation farthestLocation = MinLocation; regMaskTP farthestSet = RBM_NONE; for (regMaskTP farthestCandidates = candidates; farthestCandidates != RBM_NONE;) { regMaskTP farthestCandidateBit = genFindLowestBit(farthestCandidates); farthestCandidates &= ~farthestCandidateBit; regNumber farthestCandidateRegNum = genRegNumFromMask(farthestCandidateBit); // Find the next RefPosition of the register. LsraLocation nextIntervalLocation = linearScan->getNextIntervalRef(farthestCandidateRegNum, currentInterval->registerType); LsraLocation nextPhysRefLocation = Min(linearScan->nextFixedRef[farthestCandidateRegNum], nextIntervalLocation); if (nextPhysRefLocation == farthestLocation) { farthestSet |= farthestCandidateBit; } else if (nextPhysRefLocation > farthestLocation) { farthestSet = farthestCandidateBit; farthestLocation = nextPhysRefLocation; } } // We must have at least one with the lowest spill cost. assert(farthestSet != RBM_NONE); found = applySelection(FAR_NEXT_REF, farthestSet); } // ---------------------------------------------------------- // try_PREV_REG_OPT: Apply the PREV_REG_OPT heuristic. // void LinearScan::RegisterSelection::try_PREV_REG_OPT() { assert(!found); regMaskTP prevRegOptSet = RBM_NONE; for (regMaskTP prevRegOptCandidates = candidates; prevRegOptCandidates != RBM_NONE;) { regMaskTP prevRegOptCandidateBit = genFindLowestBit(prevRegOptCandidates); prevRegOptCandidates &= ~prevRegOptCandidateBit; regNumber prevRegOptCandidateRegNum = genRegNumFromMask(prevRegOptCandidateBit); Interval* assignedInterval = linearScan->physRegs[prevRegOptCandidateRegNum].assignedInterval; bool foundPrevRegOptReg = true; #ifdef DEBUG bool hasAssignedInterval = false; #endif if ((assignedInterval != nullptr) && (assignedInterval->recentRefPosition != nullptr)) { foundPrevRegOptReg &= (assignedInterval->recentRefPosition->reload && assignedInterval->recentRefPosition->RegOptional()); #ifdef DEBUG hasAssignedInterval = true; #endif } #ifndef TARGET_ARM else { foundPrevRegOptReg = false; } #endif #ifdef TARGET_ARM // If current interval is TYP_DOUBLE, verify if the other half register matches the heuristics. // We have three cases: // 1. One of the register of the pair have an assigned interval: Check if that register's refPosition // matches the heuristics. If yes, add it to the set. // 2. Both registers of the pair have an assigned interval: Conservatively "and" conditions for // heuristics of their corresponding refPositions. If both register's heuristic matches, add them // to the set. TODO-CQ-ARM: We may implement a better condition later. // 3. None of the register have an assigned interval: Skip adding register and assert. if (currentInterval->registerType == TYP_DOUBLE) { regNumber anotherHalfRegNum = linearScan->findAnotherHalfRegNum(prevRegOptCandidateRegNum); assignedInterval = linearScan->physRegs[anotherHalfRegNum].assignedInterval; if ((assignedInterval != nullptr) && (assignedInterval->recentRefPosition != nullptr)) { if (assignedInterval->recentRefPosition->reload && assignedInterval->recentRefPosition->RegOptional()) { foundPrevRegOptReg &= (assignedInterval->recentRefPosition->reload && assignedInterval->recentRefPosition->RegOptional()); } #ifdef DEBUG hasAssignedInterval = true; #endif } } #endif if (foundPrevRegOptReg) { // TODO-Cleanup: Previously, we always used the highest regNum with a previous regOptional // RefPosition, which is not really consistent with the way other selection criteria are // applied. should probably be: prevRegOptSet |= prevRegOptCandidateBit; prevRegOptSet = prevRegOptCandidateBit; } #ifdef DEBUG // The assigned should be non-null, and should have a recentRefPosition, however since // this is a heuristic, we don't want a fatal error, so we just assert (not noway_assert). if (!hasAssignedInterval) { assert(!"Spill candidate has no assignedInterval recentRefPosition"); } #endif } found = applySelection(PREV_REG_OPT, prevRegOptSet); } // ---------------------------------------------------------- // try_REG_NUM: Apply the REG_NUM heuristic. // void LinearScan::RegisterSelection::try_REG_NUM() { assert(!found); found = applySingleRegSelection(REG_NUM, genFindLowestBit(candidates)); } // ---------------------------------------------------------- // calculateCoversSets: Calculate the necessary covers set registers to be used // for heuristics lke COVERS, COVERS_RELATED, COVERS_FULL. // void LinearScan::RegisterSelection::calculateCoversSets() { if (freeCandidates == RBM_NONE || coversSetsCalculated) { return; } preferenceSet = (candidates & preferences); regMaskTP coversCandidates = (preferenceSet == RBM_NONE) ? candidates : preferenceSet; for (; coversCandidates != RBM_NONE;) { regMaskTP coversCandidateBit = genFindLowestBit(coversCandidates); coversCandidates &= ~coversCandidateBit; regNumber coversCandidateRegNum = genRegNumFromMask(coversCandidateBit); // If we have a single candidate we don't need to compute the preference-related sets, but we // do need to compute the unassignedSet. if (!found) { // Find the next RefPosition of the register. LsraLocation nextIntervalLocation = linearScan->getNextIntervalRef(coversCandidateRegNum, regType); LsraLocation nextPhysRefLocation = linearScan->getNextFixedRef(coversCandidateRegNum, regType); LsraLocation coversCandidateLocation = Min(nextPhysRefLocation, nextIntervalLocation); // If the nextPhysRefLocation is a fixedRef for the rangeEndRefPosition, increment it so that // we don't think it isn't covering the live range. // This doesn't handle the case where earlier RefPositions for this Interval are also // FixedRefs of this regNum, but at least those are only interesting in the case where those // are "local last uses" of the Interval - otherwise the liveRange would interfere with the reg. if (coversCandidateLocation == rangeEndLocation && rangeEndRefPosition->isFixedRefOfReg(coversCandidateRegNum)) { INDEBUG(linearScan->dumpLsraAllocationEvent(LSRA_EVENT_INCREMENT_RANGE_END, currentInterval)); coversCandidateLocation++; } if (coversCandidateLocation > rangeEndLocation) { coversSet |= coversCandidateBit; } if ((coversCandidateBit & relatedPreferences) != RBM_NONE) { if (coversCandidateLocation > relatedLastLocation) { coversRelatedSet |= coversCandidateBit; } } else if (coversCandidateBit == refPosition->registerAssignment) { // If we had a fixed-reg def of a reg that will be killed before the use, prefer it to any other // registers with the same score. (Note that we haven't changed the original registerAssignment // on the RefPosition). // Overload the RELATED_PREFERENCE value. // TODO-CQ: Consider if this should be split out. coversRelatedSet |= coversCandidateBit; } // Does this cover the full range of the interval? if (coversCandidateLocation > lastLocation) { coversFullSet |= coversCandidateBit; } } // The register is considered unassigned if it has no assignedInterval, OR // if its next reference is beyond the range of this interval. if (linearScan->nextIntervalRef[coversCandidateRegNum] > lastLocation) { unassignedSet |= coversCandidateBit; } } coversSetsCalculated = true; } // ---------------------------------------------------------- // select: For given `currentInterval` and `refPosition`, selects a register to be assigned. // // Arguments: // currentInterval - Current interval for which register needs to be selected. // refPosition - Refposition within the interval for which register needs to be selected. // // Return Values: // Register bit selected (a single register) and REG_NA if no register was selected. // regMaskTP LinearScan::RegisterSelection::select(Interval* currentInterval, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)) { #ifdef DEBUG *registerScore = NONE; #endif reset(currentInterval, refPosition); // process data-structures if (RefTypeIsDef(refPosition->refType)) { if (currentInterval->hasConflictingDefUse) { linearScan->resolveConflictingDefAndUse(currentInterval, refPosition); candidates = refPosition->registerAssignment; } // Otherwise, check for the case of a fixed-reg def of a reg that will be killed before the // use, or interferes at the point of use (which shouldn't happen, but Lower doesn't mark // the contained nodes as interfering). // Note that we may have a ParamDef RefPosition that is marked isFixedRegRef, but which // has had its registerAssignment changed to no longer be a single register. else if (refPosition->isFixedRegRef && nextRefPos != nullptr && RefTypeIsUse(nextRefPos->refType) && !nextRefPos->isFixedRegRef && genMaxOneBit(refPosition->registerAssignment)) { regNumber defReg = refPosition->assignedReg(); RegRecord* defRegRecord = linearScan->getRegisterRecord(defReg); RefPosition* currFixedRegRefPosition = defRegRecord->recentRefPosition; assert(currFixedRegRefPosition != nullptr && currFixedRegRefPosition->nodeLocation == refPosition->nodeLocation); // If there is another fixed reference to this register before the use, change the candidates // on this RefPosition to include that of nextRefPos. RefPosition* nextFixedRegRefPosition = defRegRecord->getNextRefPosition(); if (nextFixedRegRefPosition != nullptr && nextFixedRegRefPosition->nodeLocation <= nextRefPos->getRefEndLocation()) { candidates |= nextRefPos->registerAssignment; if (preferences == refPosition->registerAssignment) { preferences = candidates; } } } } preferences &= candidates; if (preferences == RBM_NONE) { preferences = candidates; } #ifdef DEBUG candidates = linearScan->stressLimitRegs(refPosition, candidates); #endif assert(candidates != RBM_NONE); Interval* nextRelatedInterval = relatedInterval; Interval* finalRelatedInterval = relatedInterval; Interval* rangeEndInterval = relatedInterval; bool avoidByteRegs = false; #ifdef TARGET_X86 if ((relatedPreferences & ~RBM_BYTE_REGS) != RBM_NONE) { avoidByteRegs = true; } #endif // Follow the chain of related intervals, as long as: // - The next reference is a def. We don't want to use the relatedInterval for preferencing if its next reference // is not a new definition (as it either is or will become live). // - The next (def) reference is downstream. Otherwise we could iterate indefinitely because the preferences can be // circular. // - The intersection of preferenced registers is non-empty. // while (nextRelatedInterval != nullptr) { RefPosition* nextRelatedRefPosition = nextRelatedInterval->getNextRefPosition(); // Only use the relatedInterval for preferencing if the related interval's next reference // is a new definition. if ((nextRelatedRefPosition != nullptr) && RefTypeIsDef(nextRelatedRefPosition->refType)) { finalRelatedInterval = nextRelatedInterval; nextRelatedInterval = nullptr; // First, get the preferences for this interval regMaskTP thisRelatedPreferences = finalRelatedInterval->getCurrentPreferences(); // Now, determine if they are compatible and update the relatedPreferences that we'll consider. regMaskTP newRelatedPreferences = thisRelatedPreferences & relatedPreferences; if (newRelatedPreferences != RBM_NONE && (!avoidByteRegs || thisRelatedPreferences != RBM_BYTE_REGS)) { // TODO-CQ: The following isFree() check doesn't account for the possibility that there's an // assignedInterval whose recentRefPosition was delayFree. It also fails to account for // the TYP_DOUBLE case on ARM. It would be better to replace the call to isFree with // isRegAvailable(genRegNumFromMask(newRelatedPreferences), regType)), but this is retained // to achieve zero diffs. // bool thisIsSingleReg = isSingleRegister(newRelatedPreferences); if (!thisIsSingleReg || (finalRelatedInterval->isLocalVar && linearScan->isFree(linearScan->getRegisterRecord(genRegNumFromMask(newRelatedPreferences))))) { relatedPreferences = newRelatedPreferences; // If this Interval has a downstream def without a single-register preference, continue to iterate. if (nextRelatedRefPosition->nodeLocation > rangeEndLocation) { preferCalleeSave = (preferCalleeSave || finalRelatedInterval->preferCalleeSave); rangeEndLocation = nextRelatedRefPosition->getRangeEndLocation(); rangeEndInterval = finalRelatedInterval; nextRelatedInterval = finalRelatedInterval->relatedInterval; } } } } else { if (nextRelatedInterval == relatedInterval) { relatedInterval = nullptr; relatedPreferences = RBM_NONE; } nextRelatedInterval = nullptr; } } // For floating point, we want to be less aggressive about using callee-save registers. // So in that case, we just need to ensure that the current RefPosition is covered. if (useFloatReg(currentInterval->registerType)) { rangeEndRefPosition = refPosition; preferCalleeSave = currentInterval->preferCalleeSave; } else if (currentInterval->isWriteThru && refPosition->spillAfter) { // This is treated as a last use of the register, as there is an upcoming EH boundary. rangeEndRefPosition = refPosition; } else { rangeEndRefPosition = refPosition->getRangeEndRef(); // If we have a chain of related intervals, and a finalRelatedInterval that // is not currently occupying a register, and whose lifetime begins after this one, // we want to try to select a register that will cover its lifetime. if ((rangeEndInterval != nullptr) && (rangeEndInterval->assignedReg == nullptr) && !rangeEndInterval->isWriteThru && (rangeEndInterval->getNextRefLocation() >= rangeEndRefPosition->nodeLocation)) { lastRefPosition = rangeEndInterval->lastRefPosition; } } if ((relatedInterval != nullptr) && !relatedInterval->isWriteThru) { relatedLastLocation = relatedInterval->lastRefPosition->nodeLocation; } if (preferCalleeSave) { regMaskTP calleeSaveCandidates = calleeSaveRegs(currentInterval->registerType); if (currentInterval->isWriteThru) { // We'll only prefer a callee-save register if it's already been used. regMaskTP unusedCalleeSaves = calleeSaveCandidates & ~(linearScan->compiler->codeGen->regSet.rsGetModifiedRegsMask()); callerCalleePrefs = calleeSaveCandidates & ~unusedCalleeSaves; preferences &= ~unusedCalleeSaves; } else { callerCalleePrefs = calleeSaveCandidates; } } else { callerCalleePrefs = callerSaveRegs(currentInterval->registerType); } // If this has a delayed use (due to being used in a rmw position of a // non-commutative operator), its endLocation is delayed until the "def" // position, which is one location past the use (getRefEndLocation() takes care of this). rangeEndLocation = rangeEndRefPosition->getRefEndLocation(); lastLocation = lastRefPosition->getRefEndLocation(); // We'll set this to short-circuit remaining heuristics when we have a single candidate. found = false; // Is this a fixedReg? regMaskTP fixedRegMask = RBM_NONE; if (refPosition->isFixedRegRef) { assert(genMaxOneBit(refPosition->registerAssignment)); fixedRegMask = refPosition->registerAssignment; if (candidates == refPosition->registerAssignment) { found = true; if (linearScan->nextIntervalRef[genRegNumFromMask(candidates)] > lastLocation) { unassignedSet = candidates; } } } // Eliminate candidates that are in-use or busy. if (!found) { regMaskTP busyRegs = linearScan->regsBusyUntilKill | linearScan->regsInUseThisLocation; candidates &= ~busyRegs; // Also eliminate as busy any register with a conflicting fixed reference at this or // the next location. // Note that this will eliminate the fixedReg, if any, but we'll add it back below. regMaskTP checkConflictMask = candidates & linearScan->fixedRegs; while (checkConflictMask != RBM_NONE) { regMaskTP checkConflictBit = genFindLowestBit(checkConflictMask); checkConflictMask &= ~checkConflictBit; regNumber checkConflictReg = genRegNumFromMask(checkConflictBit); LsraLocation checkConflictLocation = linearScan->nextFixedRef[checkConflictReg]; if ((checkConflictLocation == currentLocation) || (refPosition->delayRegFree && (checkConflictLocation == (currentLocation + 1)))) { candidates &= ~checkConflictBit; } } candidates |= fixedRegMask; found = isSingleRegister(candidates); } // By chance, is prevRegRec already holding this interval, as a copyReg or having // been restored as inactive after a kill? // NOTE: this is not currently considered one of the selection criteria - it always wins // if it is the assignedInterval of 'prevRegRec'. if (!found && (prevRegRec != nullptr)) { prevRegBit = genRegMask(prevRegRec->regNum); if ((prevRegRec->assignedInterval == currentInterval) && ((candidates & prevRegBit) != RBM_NONE)) { candidates = prevRegBit; found = true; #ifdef DEBUG *registerScore = THIS_ASSIGNED; #endif } } else { prevRegBit = RBM_NONE; } if (!found && (candidates == RBM_NONE)) { assert(refPosition->RegOptional()); currentInterval->assignedReg = nullptr; return RBM_NONE; } // TODO-Cleanup: Previously, the "reverseSelect" stress mode reversed the order of the heuristics. // It needs to be re-engineered with this refactoring. // In non-debug builds, this will simply get optimized away bool reverseSelect = false; #ifdef DEBUG reverseSelect = linearScan->doReverseSelect(); #endif // DEBUG freeCandidates = linearScan->getFreeCandidates(candidates, regType); // If no free candidates, then double check if refPosition is an actual ref. if (freeCandidates == RBM_NONE) { // We won't spill if this refPosition is not an actual ref. if (!refPosition->IsActualRef()) { currentInterval->assignedReg = nullptr; return RBM_NONE; } } else { // Set the 'matchingConstants' set. if (currentInterval->isConstant && RefTypeIsDef(refPosition->refType)) { matchingConstants = linearScan->getMatchingConstants(candidates, currentInterval, refPosition); } } #define IF_FOUND_GOTO_DONE \ if (found) \ goto Selection_Done; #ifdef DEBUG HeuristicFn fn; for (int orderId = 0; orderId < REGSELECT_HEURISTIC_COUNT; orderId++) { IF_FOUND_GOTO_DONE RegisterScore heuristicToApply = RegSelectionOrder[orderId]; if (mappingTable->Lookup(heuristicToApply, &fn)) { (this->*fn)(); if (found) { *registerScore = heuristicToApply; } #if TRACK_LSRA_STATS INTRACK_STATS_IF(found, linearScan->updateLsraStat(linearScan->getLsraStatFromScore(heuristicToApply), refPosition->bbNum)); #endif // TRACK_LSRA_STATS } else { assert(!"Unexpected heuristic value!"); } } #else // RELEASE // In release, just invoke the default order #define REG_SEL_DEF(stat, value, shortname, orderSeqId) \ try_##stat(); \ IF_FOUND_GOTO_DONE #include "lsra_score.h" #undef REG_SEL_DEF #endif // DEBUG #undef IF_FOUND_GOTO_DONE Selection_Done: if (skipAllocation) { return RBM_NONE; } calculateCoversSets(); assert(found && isSingleRegister(candidates)); foundRegBit = candidates; return candidates; }
1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/jit/lsra.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*****************************************************************************/ #ifndef _LSRA_H_ #define _LSRA_H_ #include "arraylist.h" #include "smallhash.h" // Minor and forward-reference types class Interval; class RefPosition; class LinearScan; class RegRecord; template <class T> class ArrayStack; // LsraLocation tracks the linearized order of the nodes. // Each node is assigned two LsraLocations - one for all the uses and all but the last // def, and a second location for the last def (if any) typedef unsigned int LsraLocation; const unsigned int MinLocation = 0; const unsigned int MaxLocation = UINT_MAX; // max number of registers an operation could require internally (in addition to uses and defs) const unsigned int MaxInternalRegisters = 8; const unsigned int RegisterTypeCount = 2; /***************************************************************************** * Register types *****************************************************************************/ typedef var_types RegisterType; #define IntRegisterType TYP_INT #define FloatRegisterType TYP_FLOAT //------------------------------------------------------------------------ // regType: Return the RegisterType to use for a given type // // Arguments: // type - the type of interest // template <class T> RegisterType regType(T type) { return varTypeUsesFloatReg(TypeGet(type)) ? FloatRegisterType : IntRegisterType; } //------------------------------------------------------------------------ // useFloatReg: Check if the given var_type should be allocated to a FloatRegisterType // inline bool useFloatReg(var_types type) { return (regType(type) == FloatRegisterType); } //------------------------------------------------------------------------ // registerTypesEquivalent: Check to see if two RegisterTypes are equivalent // inline bool registerTypesEquivalent(RegisterType a, RegisterType b) { return varTypeIsIntegralOrI(a) == varTypeIsIntegralOrI(b); } //------------------------------------------------------------------------ // calleeSaveRegs: Get the set of callee-save registers of the given RegisterType // inline regMaskTP calleeSaveRegs(RegisterType rt) { return varTypeIsIntegralOrI(rt) ? RBM_INT_CALLEE_SAVED : RBM_FLT_CALLEE_SAVED; } //------------------------------------------------------------------------ // callerSaveRegs: Get the set of caller-save registers of the given RegisterType // inline regMaskTP callerSaveRegs(RegisterType rt) { return varTypeIsIntegralOrI(rt) ? RBM_INT_CALLEE_TRASH : RBM_FLT_CALLEE_TRASH; } //------------------------------------------------------------------------ // RefInfo: Captures the necessary information for a definition that is "in-flight" // during `buildIntervals` (i.e. a tree-node definition has been encountered, // but not its use). This includes the RefPosition and its associated // GenTree node. // struct RefInfo { RefPosition* ref; GenTree* treeNode; RefInfo(RefPosition* r, GenTree* t) : ref(r), treeNode(t) { } // default constructor for data structures RefInfo() { } }; //------------------------------------------------------------------------ // RefInfoListNode: used to store a single `RefInfo` value for a // node during `buildIntervals`. // // This is the node type for `RefInfoList` below. // class RefInfoListNode final : public RefInfo { friend class RefInfoList; friend class RefInfoListNodePool; RefInfoListNode* m_next; // The next node in the list public: RefInfoListNode(RefPosition* r, GenTree* t) : RefInfo(r, t) { } //------------------------------------------------------------------------ // RefInfoListNode::Next: Returns the next node in the list. RefInfoListNode* Next() const { return m_next; } }; //------------------------------------------------------------------------ // RefInfoList: used to store a list of `RefInfo` values for a // node during `buildIntervals`. // // This list of 'RefInfoListNode's contains the source nodes consumed by // a node, and is created by 'BuildNode'. // class RefInfoList final { friend class RefInfoListNodePool; RefInfoListNode* m_head; // The head of the list RefInfoListNode* m_tail; // The tail of the list public: RefInfoList() : m_head(nullptr), m_tail(nullptr) { } RefInfoList(RefInfoListNode* node) : m_head(node), m_tail(node) { assert(m_head->m_next == nullptr); } //------------------------------------------------------------------------ // RefInfoList::IsEmpty: Returns true if the list is empty. // bool IsEmpty() const { return m_head == nullptr; } //------------------------------------------------------------------------ // RefInfoList::Begin: Returns the first node in the list. // RefInfoListNode* Begin() const { return m_head; } //------------------------------------------------------------------------ // RefInfoList::End: Returns the position after the last node in the // list. The returned value is suitable for use as // a sentinel for iteration. // RefInfoListNode* End() const { return nullptr; } //------------------------------------------------------------------------ // RefInfoList::End: Returns the position after the last node in the // list. The returned value is suitable for use as // a sentinel for iteration. // RefInfoListNode* Last() const { return m_tail; } //------------------------------------------------------------------------ // RefInfoList::Append: Appends a node to the list. // // Arguments: // node - The node to append. Must not be part of an existing list. // void Append(RefInfoListNode* node) { assert(node->m_next == nullptr); if (m_tail == nullptr) { assert(m_head == nullptr); m_head = node; } else { m_tail->m_next = node; } m_tail = node; } //------------------------------------------------------------------------ // RefInfoList::Append: Appends another list to this list. // // Arguments: // other - The list to append. // void Append(RefInfoList other) { if (m_tail == nullptr) { assert(m_head == nullptr); m_head = other.m_head; } else { m_tail->m_next = other.m_head; } m_tail = other.m_tail; } //------------------------------------------------------------------------ // RefInfoList::Prepend: Prepends a node to the list. // // Arguments: // node - The node to prepend. Must not be part of an existing list. // void Prepend(RefInfoListNode* node) { assert(node->m_next == nullptr); if (m_head == nullptr) { assert(m_tail == nullptr); m_tail = node; } else { node->m_next = m_head; } m_head = node; } //------------------------------------------------------------------------ // RefInfoList::Add: Adds a node to the list. // // Arguments: // node - The node to add. Must not be part of an existing list. // prepend - True if it should be prepended (otherwise is appended) // void Add(RefInfoListNode* node, bool prepend) { if (prepend) { Prepend(node); } else { Append(node); } } //------------------------------------------------------------------------ // removeListNode - retrieve the RefInfo for the given node // // Notes: // The BuildNode methods use this helper to retrieve the RefInfo for child nodes // from the useList being constructed. // RefInfoListNode* removeListNode(RefInfoListNode* listNode, RefInfoListNode* prevListNode) { RefInfoListNode* nextNode = listNode->Next(); if (prevListNode == nullptr) { m_head = nextNode; } else { prevListNode->m_next = nextNode; } if (nextNode == nullptr) { m_tail = prevListNode; } listNode->m_next = nullptr; return listNode; } // removeListNode - remove the RefInfoListNode for the given GenTree node from the defList RefInfoListNode* removeListNode(GenTree* node); // Same as above but takes a multiRegIdx to support multi-reg nodes. RefInfoListNode* removeListNode(GenTree* node, unsigned multiRegIdx); //------------------------------------------------------------------------ // GetRefPosition - retrieve the RefPosition for the given node // // Notes: // The Build methods use this helper to retrieve the RefPosition for child nodes // from the useList being constructed. Note that, if the user knows the order of the operands, // it is expected that they should just retrieve them directly. RefPosition* GetRefPosition(GenTree* node) { for (RefInfoListNode *listNode = Begin(), *end = End(); listNode != end; listNode = listNode->Next()) { if (listNode->treeNode == node) { return listNode->ref; } } assert(!"GetRefPosition didn't find the node"); unreached(); } //------------------------------------------------------------------------ // RefInfoList::GetSecond: Gets the second node in the list. // // Arguments: // (DEBUG ONLY) treeNode - The GenTree* we expect to be in the second node. // RefInfoListNode* GetSecond(INDEBUG(GenTree* treeNode)) { noway_assert((Begin() != nullptr) && (Begin()->Next() != nullptr)); RefInfoListNode* second = Begin()->Next(); assert(second->treeNode == treeNode); return second; } #ifdef DEBUG // Count - return the number of nodes in the list (DEBUG only) int Count() { int count = 0; for (RefInfoListNode *listNode = Begin(), *end = End(); listNode != end; listNode = listNode->Next()) { count++; } return count; } #endif // DEBUG }; //------------------------------------------------------------------------ // RefInfoListNodePool: manages a pool of `RefInfoListNode` // values to decrease overall memory usage // during `buildIntervals`. // // `buildIntervals` involves creating a list of RefInfo items per // node that either directly produces a set of registers or that is a // contained node with register-producing sources. However, these lists // are short-lived: they are destroyed once the use of the corresponding // node is processed. As such, there is typically only a small number of // `RefInfoListNode` values in use at any given time. Pooling these // values avoids otherwise frequent allocations. class RefInfoListNodePool final { RefInfoListNode* m_freeList; Compiler* m_compiler; static const unsigned defaultPreallocation = 8; public: RefInfoListNodePool(Compiler* compiler, unsigned preallocate = defaultPreallocation); RefInfoListNode* GetNode(RefPosition* r, GenTree* t); void ReturnNode(RefInfoListNode* listNode); }; #if TRACK_LSRA_STATS enum LsraStat { #define LSRA_STAT_DEF(enum_name, enum_str) enum_name, #include "lsra_stats.h" #undef LSRA_STAT_DEF #define REG_SEL_DEF(enum_name, value, short_str, orderSeqId) STAT_##enum_name, #include "lsra_score.h" #undef REG_SEL_DEF COUNT }; #endif // TRACK_LSRA_STATS struct LsraBlockInfo { // bbNum of the predecessor to use for the register location of live-in variables. // 0 for fgFirstBB. unsigned int predBBNum; weight_t weight; bool hasCriticalInEdge : 1; bool hasCriticalOutEdge : 1; bool hasEHBoundaryIn : 1; bool hasEHBoundaryOut : 1; bool hasEHPred : 1; #if TRACK_LSRA_STATS // Per block maintained LSRA statistics. unsigned stats[LsraStat::COUNT]; #endif // TRACK_LSRA_STATS }; enum RegisterScore { #define REG_SEL_DEF(enum_name, value, short_str, orderSeqId) enum_name = value, #include "lsra_score.h" #undef REG_SEL_DEF NONE = 0 }; // This is sort of a bit mask // The low order 2 bits will be 1 for defs, and 2 for uses enum RefType : unsigned char { #define DEF_REFTYPE(memberName, memberValue, shortName) memberName = memberValue, #include "lsra_reftypes.h" #undef DEF_REFTYPE }; // position in a block (for resolution) enum BlockStartOrEnd { BlockPositionStart = 0, BlockPositionEnd = 1, PositionCount = 2 }; inline bool RefTypeIsUse(RefType refType) { return ((refType & RefTypeUse) == RefTypeUse); } inline bool RefTypeIsDef(RefType refType) { return ((refType & RefTypeDef) == RefTypeDef); } typedef regNumberSmall* VarToRegMap; typedef jitstd::list<Interval> IntervalList; typedef jitstd::list<RefPosition> RefPositionList; typedef jitstd::list<RefPosition>::iterator RefPositionIterator; typedef jitstd::list<RefPosition>::reverse_iterator RefPositionReverseIterator; class Referenceable { public: Referenceable() { firstRefPosition = nullptr; recentRefPosition = nullptr; lastRefPosition = nullptr; } // A linked list of RefPositions. These are only traversed in the forward // direction, and are not moved, so they don't need to be doubly linked // (see RefPosition). RefPosition* firstRefPosition; RefPosition* recentRefPosition; RefPosition* lastRefPosition; // Get the position of the next reference which is at or greater than // the current location (relies upon recentRefPosition being udpated // during traversal). RefPosition* getNextRefPosition(); LsraLocation getNextRefLocation(); }; class RegRecord : public Referenceable { public: RegRecord() { assignedInterval = nullptr; previousInterval = nullptr; regNum = REG_NA; isCalleeSave = false; registerType = IntRegisterType; } void init(regNumber reg) { #ifdef TARGET_ARM64 // The Zero register, or the SP if ((reg == REG_ZR) || (reg == REG_SP)) { // IsGeneralRegister returns false for REG_ZR and REG_SP regNum = reg; registerType = IntRegisterType; } else #endif if (emitter::isFloatReg(reg)) { registerType = FloatRegisterType; } else { // The constructor defaults to IntRegisterType assert(emitter::isGeneralRegister(reg) && registerType == IntRegisterType); } regNum = reg; isCalleeSave = ((RBM_CALLEE_SAVED & genRegMask(reg)) != 0); } #ifdef DEBUG // print out representation void dump(); // concise representation for embedding void tinyDump(); #endif // DEBUG // DATA // interval to which this register is currently allocated. // If the interval is inactive (isActive == false) then it is not currently live, // and the register can be unassigned (i.e. setting assignedInterval to nullptr) // without spilling the register. Interval* assignedInterval; // Interval to which this register was previously allocated, and which was unassigned // because it was inactive. This register will be reassigned to this Interval when // assignedInterval becomes inactive. Interval* previousInterval; regNumber regNum; bool isCalleeSave; RegisterType registerType; unsigned char regOrder; }; inline bool leafInRange(GenTree* leaf, int lower, int upper) { if (!leaf->IsIntCnsFitsInI32()) { return false; } if (leaf->AsIntCon()->gtIconVal < lower) { return false; } if (leaf->AsIntCon()->gtIconVal > upper) { return false; } return true; } inline bool leafInRange(GenTree* leaf, int lower, int upper, int multiple) { if (!leafInRange(leaf, lower, upper)) { return false; } if (leaf->AsIntCon()->gtIconVal % multiple) { return false; } return true; } inline bool leafAddInRange(GenTree* leaf, int lower, int upper, int multiple = 1) { if (leaf->OperGet() != GT_ADD) { return false; } return leafInRange(leaf->gtGetOp2(), lower, upper, multiple); } inline bool isCandidateVar(const LclVarDsc* varDsc) { return varDsc->lvLRACandidate; } /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX LinearScan XX XX XX XX This is the container for the Linear Scan data structures and methods. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // OPTION 1: The algorithm as described in "Optimized Interval Splitting in a // Linear Scan Register Allocator". It is driven by iterating over the Interval // lists. In this case, we need multiple IntervalLists, and Intervals will be // moved between them so they must be easily updated. // OPTION 2: The algorithm is driven by iterating over the RefPositions. In this // case, we only need a single IntervalList, and it won't be updated. // The RefPosition must refer to its Interval, and we need to be able to traverse // to the next RefPosition in code order // THIS IS THE OPTION CURRENTLY BEING PURSUED class LinearScan : public LinearScanInterface { friend class RefPosition; friend class Interval; friend class Lowering; public: // This could use further abstraction. From Compiler we need the tree, // the flowgraph and the allocator. LinearScan(Compiler* theCompiler); // This is the main driver virtual void doLinearScan(); static bool isSingleRegister(regMaskTP regMask) { return (genExactlyOneBit(regMask)); } // Initialize the block traversal for LSRA. // This resets the bbVisitedSet, and on the first invocation sets the blockSequence array, // which determines the order in which blocks will be allocated (currently called during Lowering). BasicBlock* startBlockSequence(); // Move to the next block in sequence, updating the current block information. BasicBlock* moveToNextBlock(); // Get the next block to be scheduled without changing the current block, // but updating the blockSequence during the first iteration if it is not fully computed. BasicBlock* getNextBlock(); // This is called during code generation to update the location of variables virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb); // This does the dataflow analysis and builds the intervals void buildIntervals(); // This is where the actual assignment is done void allocateRegisters(); // This is the resolution phase, where cross-block mismatches are fixed up void resolveRegisters(); void writeRegisters(RefPosition* currentRefPosition, GenTree* tree); // Insert a copy in the case where a tree node value must be moved to a different // register at the point of use, or it is reloaded to a different register // than the one it was spilled from void insertCopyOrReload(BasicBlock* block, GenTree* tree, unsigned multiRegIdx, RefPosition* refPosition); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE void makeUpperVectorInterval(unsigned varIndex); Interval* getUpperVectorInterval(unsigned varIndex); // Save the upper half of a vector that lives in a callee-save register at the point of a call. void insertUpperVectorSave(GenTree* tree, RefPosition* refPosition, Interval* upperVectorInterval, BasicBlock* block); // Restore the upper half of a vector that's been partially spilled prior to a use in 'tree'. void insertUpperVectorRestore(GenTree* tree, RefPosition* refPosition, Interval* upperVectorInterval, BasicBlock* block); #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE // resolve along one block-block edge enum ResolveType { ResolveSplit, ResolveJoin, ResolveCritical, ResolveSharedCritical, ResolveTypeCount }; #ifdef DEBUG static const char* resolveTypeName[ResolveTypeCount]; #endif enum WhereToInsert { InsertAtTop, InsertAtBottom }; #ifdef TARGET_ARM void addResolutionForDouble(BasicBlock* block, GenTree* insertionPoint, Interval** sourceIntervals, regNumberSmall* location, regNumber toReg, regNumber fromReg, ResolveType resolveType); #endif void addResolution( BasicBlock* block, GenTree* insertionPoint, Interval* interval, regNumber outReg, regNumber inReg); void handleOutgoingCriticalEdges(BasicBlock* block); void resolveEdge(BasicBlock* fromBlock, BasicBlock* toBlock, ResolveType resolveType, VARSET_VALARG_TP liveSet); void resolveEdges(); // Keep track of how many temp locations we'll need for spill void initMaxSpill(); void updateMaxSpill(RefPosition* refPosition); void recordMaxSpill(); // max simultaneous spill locations used of every type unsigned int maxSpill[TYP_COUNT]; unsigned int currentSpill[TYP_COUNT]; bool needFloatTmpForFPCall; bool needDoubleTmpForFPCall; #ifdef DEBUG private: //------------------------------------------------------------------------ // Should we stress lsra? This uses the COMPlus_JitStressRegs variable. // // The mask bits are currently divided into fields in which each non-zero value // is a distinct stress option (e.g. 0x3 is not a combination of 0x1 and 0x2). // However, subject to possible constraints (to be determined), the different // fields can be combined (e.g. 0x7 is a combination of 0x3 and 0x4). // Note that the field values are declared in a public enum, but the actual bits are // only accessed via accessors. unsigned lsraStressMask; // This controls the registers available for allocation enum LsraStressLimitRegs{LSRA_LIMIT_NONE = 0, LSRA_LIMIT_CALLEE = 0x1, LSRA_LIMIT_CALLER = 0x2, LSRA_LIMIT_SMALL_SET = 0x3, LSRA_LIMIT_MASK = 0x3}; // When LSRA_LIMIT_SMALL_SET is specified, it is desirable to select a "mixed" set of caller- and callee-save // registers, so as to get different coverage than limiting to callee or caller. // At least for x86 and AMD64, and potentially other architecture that will support SIMD, // we need a minimum of 5 fp regs in order to support the InitN intrinsic for Vector4. // Hence the "SmallFPSet" has 5 elements. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI // On System V the RDI and RSI are not callee saved. Use R12 ans R13 as callee saved registers. static const regMaskTP LsraLimitSmallIntSet = (RBM_EAX | RBM_ECX | RBM_EBX | RBM_ETW_FRAMED_EBP | RBM_R12 | RBM_R13); #else // !UNIX_AMD64_ABI // On Windows Amd64 use the RDI and RSI as callee saved registers. static const regMaskTP LsraLimitSmallIntSet = (RBM_EAX | RBM_ECX | RBM_EBX | RBM_ETW_FRAMED_EBP | RBM_ESI | RBM_EDI); #endif // !UNIX_AMD64_ABI static const regMaskTP LsraLimitSmallFPSet = (RBM_XMM0 | RBM_XMM1 | RBM_XMM2 | RBM_XMM6 | RBM_XMM7); #elif defined(TARGET_ARM) // On ARM, we may need two registers to set up the target register for a virtual call, so we need // to have at least the maximum number of arg registers, plus 2. static const regMaskTP LsraLimitSmallIntSet = (RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4 | RBM_R5); static const regMaskTP LsraLimitSmallFPSet = (RBM_F0 | RBM_F1 | RBM_F2 | RBM_F16 | RBM_F17); #elif defined(TARGET_ARM64) static const regMaskTP LsraLimitSmallIntSet = (RBM_R0 | RBM_R1 | RBM_R2 | RBM_R19 | RBM_R20); static const regMaskTP LsraLimitSmallFPSet = (RBM_V0 | RBM_V1 | RBM_V2 | RBM_V8 | RBM_V9); #elif defined(TARGET_X86) static const regMaskTP LsraLimitSmallIntSet = (RBM_EAX | RBM_ECX | RBM_EDI); static const regMaskTP LsraLimitSmallFPSet = (RBM_XMM0 | RBM_XMM1 | RBM_XMM2 | RBM_XMM6 | RBM_XMM7); #else #error Unsupported or unset target architecture #endif // target LsraStressLimitRegs getStressLimitRegs() { return (LsraStressLimitRegs)(lsraStressMask & LSRA_LIMIT_MASK); } regMaskTP getConstrainedRegMask(regMaskTP regMaskActual, regMaskTP regMaskConstrain, unsigned minRegCount); regMaskTP stressLimitRegs(RefPosition* refPosition, regMaskTP mask); // This controls the heuristics used to select registers // These can be combined. enum LsraSelect{LSRA_SELECT_DEFAULT = 0, LSRA_SELECT_REVERSE_HEURISTICS = 0x04, LSRA_SELECT_REVERSE_CALLER_CALLEE = 0x08, LSRA_SELECT_NEAREST = 0x10, LSRA_SELECT_MASK = 0x1c}; LsraSelect getSelectionHeuristics() { return (LsraSelect)(lsraStressMask & LSRA_SELECT_MASK); } bool doReverseSelect() { return ((lsraStressMask & LSRA_SELECT_REVERSE_HEURISTICS) != 0); } bool doReverseCallerCallee() { return ((lsraStressMask & LSRA_SELECT_REVERSE_CALLER_CALLEE) != 0); } bool doSelectNearest() { return ((lsraStressMask & LSRA_SELECT_NEAREST) != 0); } // This controls the order in which basic blocks are visited during allocation enum LsraTraversalOrder{LSRA_TRAVERSE_LAYOUT = 0x20, LSRA_TRAVERSE_PRED_FIRST = 0x40, LSRA_TRAVERSE_RANDOM = 0x60, // NYI LSRA_TRAVERSE_DEFAULT = LSRA_TRAVERSE_PRED_FIRST, LSRA_TRAVERSE_MASK = 0x60}; LsraTraversalOrder getLsraTraversalOrder() { if ((lsraStressMask & LSRA_TRAVERSE_MASK) == 0) { return LSRA_TRAVERSE_DEFAULT; } return (LsraTraversalOrder)(lsraStressMask & LSRA_TRAVERSE_MASK); } bool isTraversalLayoutOrder() { return getLsraTraversalOrder() == LSRA_TRAVERSE_LAYOUT; } bool isTraversalPredFirstOrder() { return getLsraTraversalOrder() == LSRA_TRAVERSE_PRED_FIRST; } // This controls whether lifetimes should be extended to the entire method. // Note that this has no effect under MinOpts enum LsraExtendLifetimes{LSRA_DONT_EXTEND = 0, LSRA_EXTEND_LIFETIMES = 0x80, LSRA_EXTEND_LIFETIMES_MASK = 0x80}; LsraExtendLifetimes getLsraExtendLifeTimes() { return (LsraExtendLifetimes)(lsraStressMask & LSRA_EXTEND_LIFETIMES_MASK); } bool extendLifetimes() { return getLsraExtendLifeTimes() == LSRA_EXTEND_LIFETIMES; } // This controls whether variables locations should be set to the previous block in layout order // (LSRA_BLOCK_BOUNDARY_LAYOUT), or to that of the highest-weight predecessor (LSRA_BLOCK_BOUNDARY_PRED - // the default), or rotated (LSRA_BLOCK_BOUNDARY_ROTATE). enum LsraBlockBoundaryLocations{LSRA_BLOCK_BOUNDARY_PRED = 0, LSRA_BLOCK_BOUNDARY_LAYOUT = 0x100, LSRA_BLOCK_BOUNDARY_ROTATE = 0x200, LSRA_BLOCK_BOUNDARY_MASK = 0x300}; LsraBlockBoundaryLocations getLsraBlockBoundaryLocations() { return (LsraBlockBoundaryLocations)(lsraStressMask & LSRA_BLOCK_BOUNDARY_MASK); } regNumber rotateBlockStartLocation(Interval* interval, regNumber targetReg, regMaskTP availableRegs); // This controls whether we always insert a GT_RELOAD instruction after a spill // Note that this can be combined with LSRA_SPILL_ALWAYS (or not) enum LsraReload{LSRA_NO_RELOAD_IF_SAME = 0, LSRA_ALWAYS_INSERT_RELOAD = 0x400, LSRA_RELOAD_MASK = 0x400}; LsraReload getLsraReload() { return (LsraReload)(lsraStressMask & LSRA_RELOAD_MASK); } bool alwaysInsertReload() { return getLsraReload() == LSRA_ALWAYS_INSERT_RELOAD; } // This controls whether we spill everywhere enum LsraSpill{LSRA_DONT_SPILL_ALWAYS = 0, LSRA_SPILL_ALWAYS = 0x800, LSRA_SPILL_MASK = 0x800}; LsraSpill getLsraSpill() { return (LsraSpill)(lsraStressMask & LSRA_SPILL_MASK); } bool spillAlways() { return getLsraSpill() == LSRA_SPILL_ALWAYS; } // This controls whether RefPositions that lower/codegen indicated as reg optional be // allocated a reg at all. enum LsraRegOptionalControl{LSRA_REG_OPTIONAL_DEFAULT = 0, LSRA_REG_OPTIONAL_NO_ALLOC = 0x1000, LSRA_REG_OPTIONAL_MASK = 0x1000}; LsraRegOptionalControl getLsraRegOptionalControl() { return (LsraRegOptionalControl)(lsraStressMask & LSRA_REG_OPTIONAL_MASK); } bool regOptionalNoAlloc() { return getLsraRegOptionalControl() == LSRA_REG_OPTIONAL_NO_ALLOC; } bool candidatesAreStressLimited() { return ((lsraStressMask & (LSRA_LIMIT_MASK | LSRA_SELECT_MASK)) != 0); } // Dump support void dumpDefList(); void lsraDumpIntervals(const char* msg); void dumpRefPositions(const char* msg); void dumpVarRefPositions(const char* msg); // Checking code static bool IsLsraAdded(GenTree* node) { return ((node->gtDebugFlags & GTF_DEBUG_NODE_LSRA_ADDED) != 0); } static void SetLsraAdded(GenTree* node) { node->gtDebugFlags |= GTF_DEBUG_NODE_LSRA_ADDED; } static bool IsResolutionMove(GenTree* node); static bool IsResolutionNode(LIR::Range& containingRange, GenTree* node); void verifyFinalAllocation(); void verifyResolutionMove(GenTree* resolutionNode, LsraLocation currentLocation); #else // !DEBUG bool doSelectNearest() { return false; } bool extendLifetimes() { return false; } bool spillAlways() { return false; } // In a retail build we support only the default traversal order bool isTraversalLayoutOrder() { return false; } bool isTraversalPredFirstOrder() { return true; } bool getLsraExtendLifeTimes() { return false; } static void SetLsraAdded(GenTree* node) { // do nothing; checked only under #DEBUG } bool candidatesAreStressLimited() { return false; } #endif // !DEBUG public: // Used by Lowering when considering whether to split Longs, as well as by identifyCandidates(). bool isRegCandidate(LclVarDsc* varDsc); bool isContainableMemoryOp(GenTree* node); private: // Determine which locals are candidates for allocation void identifyCandidates(); // determine which locals are used in EH constructs we don't want to deal with void identifyCandidatesExceptionDataflow(); void buildPhysRegRecords(); #ifdef DEBUG void checkLastUses(BasicBlock* block); int ComputeOperandDstCount(GenTree* operand); int ComputeAvailableSrcCount(GenTree* node); #endif // DEBUG void setFrameType(); // Update allocations at start/end of block void unassignIntervalBlockStart(RegRecord* regRecord, VarToRegMap inVarToRegMap); void processBlockEndAllocation(BasicBlock* current); // Record variable locations at start/end of block void processBlockStartLocations(BasicBlock* current); void processBlockEndLocations(BasicBlock* current); #ifdef TARGET_ARM bool isSecondHalfReg(RegRecord* regRec, Interval* interval); RegRecord* getSecondHalfRegRec(RegRecord* regRec); RegRecord* findAnotherHalfRegRec(RegRecord* regRec); regNumber findAnotherHalfRegNum(regNumber regNum); bool canSpillDoubleReg(RegRecord* physRegRecord, LsraLocation refLocation); void unassignDoublePhysReg(RegRecord* doubleRegRecord); #endif void updateAssignedInterval(RegRecord* reg, Interval* interval, RegisterType regType); void updatePreviousInterval(RegRecord* reg, Interval* interval, RegisterType regType); bool canRestorePreviousInterval(RegRecord* regRec, Interval* assignedInterval); bool isAssignedToInterval(Interval* interval, RegRecord* regRec); bool isRefPositionActive(RefPosition* refPosition, LsraLocation refLocation); bool canSpillReg(RegRecord* physRegRecord, LsraLocation refLocation); weight_t getSpillWeight(RegRecord* physRegRecord); // insert refpositions representing prolog zero-inits which will be added later void insertZeroInitRefPositions(); // add physreg refpositions for a tree node, based on calling convention and instruction selection predictions void addRefsForPhysRegMask(regMaskTP mask, LsraLocation currentLoc, RefType refType, bool isLastUse); void resolveConflictingDefAndUse(Interval* interval, RefPosition* defRefPosition); void buildRefPositionsForNode(GenTree* tree, LsraLocation loc); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE void buildUpperVectorSaveRefPositions(GenTree* tree, LsraLocation currentLoc, regMaskTP fpCalleeKillSet); void buildUpperVectorRestoreRefPosition(Interval* lclVarInterval, LsraLocation currentLoc, GenTree* node); #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE #if defined(UNIX_AMD64_ABI) // For AMD64 on SystemV machines. This method // is called as replacement for raUpdateRegStateForArg // that is used on Windows. On System V systems a struct can be passed // partially using registers from the 2 register files. void unixAmd64UpdateRegStateForArg(LclVarDsc* argDsc); #endif // defined(UNIX_AMD64_ABI) // Update reg state for an incoming register argument void updateRegStateForArg(LclVarDsc* argDsc); inline bool isCandidateLocalRef(GenTree* tree) { if (tree->IsLocal()) { const LclVarDsc* varDsc = compiler->lvaGetDesc(tree->AsLclVarCommon()); return isCandidateVar(varDsc); } return false; } // Helpers for getKillSetForNode(). regMaskTP getKillSetForStoreInd(GenTreeStoreInd* tree); regMaskTP getKillSetForShiftRotate(GenTreeOp* tree); regMaskTP getKillSetForMul(GenTreeOp* tree); regMaskTP getKillSetForCall(GenTreeCall* call); regMaskTP getKillSetForModDiv(GenTreeOp* tree); regMaskTP getKillSetForBlockStore(GenTreeBlk* blkNode); regMaskTP getKillSetForReturn(); regMaskTP getKillSetForProfilerHook(); #ifdef FEATURE_HW_INTRINSICS regMaskTP getKillSetForHWIntrinsic(GenTreeHWIntrinsic* node); #endif // FEATURE_HW_INTRINSICS // Return the registers killed by the given tree node. // This is used only for an assert, and for stress, so it is only defined under DEBUG. // Otherwise, the Build methods should obtain the killMask from the appropriate method above. #ifdef DEBUG regMaskTP getKillSetForNode(GenTree* tree); #endif // Given some tree node add refpositions for all the registers this node kills bool buildKillPositionsForNode(GenTree* tree, LsraLocation currentLoc, regMaskTP killMask); regMaskTP allRegs(RegisterType rt); regMaskTP allByteRegs(); regMaskTP allSIMDRegs(); regMaskTP internalFloatRegCandidates(); void makeRegisterInactive(RegRecord* physRegRecord); void freeRegister(RegRecord* physRegRecord); void freeRegisters(regMaskTP regsToFree); // Get the type that this tree defines. var_types getDefType(GenTree* tree) { var_types type = tree->TypeGet(); if (type == TYP_STRUCT) { assert(tree->OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR)); GenTreeLclVar* lclVar = tree->AsLclVar(); LclVarDsc* varDsc = compiler->lvaGetDesc(lclVar); type = varDsc->GetRegisterType(lclVar); } assert(type != TYP_UNDEF && type != TYP_STRUCT); return type; } // Managing internal registers during the BuildNode process. RefPosition* defineNewInternalTemp(GenTree* tree, RegisterType regType, regMaskTP candidates); RefPosition* buildInternalIntRegisterDefForNode(GenTree* tree, regMaskTP internalCands = RBM_NONE); RefPosition* buildInternalFloatRegisterDefForNode(GenTree* tree, regMaskTP internalCands = RBM_NONE); void buildInternalRegisterUses(); void writeLocalReg(GenTreeLclVar* lclNode, unsigned varNum, regNumber reg); void resolveLocalRef(BasicBlock* block, GenTreeLclVar* treeNode, RefPosition* currentRefPosition); void insertMove(BasicBlock* block, GenTree* insertionPoint, unsigned lclNum, regNumber inReg, regNumber outReg); void insertSwap( BasicBlock* block, GenTree* insertionPoint, unsigned lclNum1, regNumber reg1, unsigned lclNum2, regNumber reg2); private: Interval* newInterval(RegisterType regType); Interval* getIntervalForLocalVar(unsigned varIndex) { assert(varIndex < compiler->lvaTrackedCount); assert(localVarIntervals[varIndex] != nullptr); return localVarIntervals[varIndex]; } Interval* getIntervalForLocalVarNode(GenTreeLclVarCommon* tree) { const LclVarDsc* varDsc = compiler->lvaGetDesc(tree); assert(varDsc->lvTracked); return getIntervalForLocalVar(varDsc->lvVarIndex); } RegRecord* getRegisterRecord(regNumber regNum); RefPosition* newRefPositionRaw(LsraLocation nodeLocation, GenTree* treeNode, RefType refType); RefPosition* newRefPosition(Interval* theInterval, LsraLocation theLocation, RefType theRefType, GenTree* theTreeNode, regMaskTP mask, unsigned multiRegIdx = 0); // This creates a RefTypeUse at currentLoc. It sets the treeNode to nullptr if it is not a // lclVar interval. RefPosition* newUseRefPosition(Interval* theInterval, GenTree* theTreeNode, regMaskTP mask, unsigned multiRegIdx = 0); RefPosition* newRefPosition( regNumber reg, LsraLocation theLocation, RefType theRefType, GenTree* theTreeNode, regMaskTP mask); void applyCalleeSaveHeuristics(RefPosition* rp); void checkConflictingDefUse(RefPosition* rp); void associateRefPosWithInterval(RefPosition* rp); weight_t getWeight(RefPosition* refPos); /***************************************************************************** * Register management ****************************************************************************/ RegisterType getRegisterType(Interval* currentInterval, RefPosition* refPosition); #ifdef DEBUG const char* getScoreName(RegisterScore score); #endif regNumber allocateReg(Interval* current, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)); regNumber assignCopyReg(RefPosition* refPosition); bool isMatchingConstant(RegRecord* physRegRecord, RefPosition* refPosition); bool isSpillCandidate(Interval* current, RefPosition* refPosition, RegRecord* physRegRecord); void checkAndAssignInterval(RegRecord* regRec, Interval* interval); void assignPhysReg(RegRecord* regRec, Interval* interval); void assignPhysReg(regNumber reg, Interval* interval) { assignPhysReg(getRegisterRecord(reg), interval); } bool isAssigned(RegRecord* regRec ARM_ARG(RegisterType newRegType)); void checkAndClearInterval(RegRecord* regRec, RefPosition* spillRefPosition); void unassignPhysReg(RegRecord* regRec ARM_ARG(RegisterType newRegType)); void unassignPhysReg(RegRecord* regRec, RefPosition* spillRefPosition); void unassignPhysRegNoSpill(RegRecord* reg); void unassignPhysReg(regNumber reg) { unassignPhysReg(getRegisterRecord(reg), nullptr); } void setIntervalAsSpilled(Interval* interval); void setIntervalAsSplit(Interval* interval); void spillInterval(Interval* interval, RefPosition* fromRefPosition DEBUGARG(RefPosition* toRefPosition)); void spillGCRefs(RefPosition* killRefPosition); /***************************************************************************** * Register selection ****************************************************************************/ regMaskTP getFreeCandidates(regMaskTP candidates, var_types regType) { regMaskTP result = candidates & m_AvailableRegs; #ifdef TARGET_ARM // For TYP_DOUBLE on ARM, we can only use register for which the odd half is // also available. if (regType == TYP_DOUBLE) { result &= (m_AvailableRegs >> 1); } #endif // TARGET_ARM return result; } #ifdef DEBUG class RegisterSelection; // For lsra ordering experimentation typedef void (LinearScan::RegisterSelection::*HeuristicFn)(); typedef JitHashTable<RegisterScore, JitSmallPrimitiveKeyFuncs<RegisterScore>, HeuristicFn> ScoreMappingTable; #define REGSELECT_HEURISTIC_COUNT 17 #endif class RegisterSelection { public: RegisterSelection(LinearScan* linearScan); // Perform register selection and update currentInterval or refPosition FORCEINLINE regMaskTP select(Interval* currentInterval, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)); // If the register is from unassigned set such that it was not already // assigned to the current interval FORCEINLINE bool foundUnassignedReg() { assert(found && isSingleRegister(foundRegBit)); bool isUnassignedReg = ((foundRegBit & unassignedSet) != RBM_NONE); return isUnassignedReg && !isAlreadyAssigned(); } // Did register selector decide to spill this interval FORCEINLINE bool isSpilling() { return (foundRegBit & freeCandidates) == RBM_NONE; } // Is the value one of the constant that is already in a register FORCEINLINE bool isMatchingConstant() { assert(found && isSingleRegister(foundRegBit)); return (matchingConstants & foundRegBit) != RBM_NONE; } // Did we apply CONST_AVAILABLE heuristics FORCEINLINE bool isConstAvailable() { return (score & CONST_AVAILABLE) != 0; } private: #ifdef DEBUG RegisterScore RegSelectionOrder[REGSELECT_HEURISTIC_COUNT] = {NONE}; ScoreMappingTable* mappingTable = nullptr; #endif LinearScan* linearScan = nullptr; int score = 0; Interval* currentInterval = nullptr; RefPosition* refPosition = nullptr; RegisterType regType = RegisterType::TYP_UNKNOWN; LsraLocation currentLocation = MinLocation; RefPosition* nextRefPos = nullptr; regMaskTP candidates; regMaskTP preferences = RBM_NONE; Interval* relatedInterval = nullptr; regMaskTP relatedPreferences = RBM_NONE; LsraLocation rangeEndLocation; LsraLocation relatedLastLocation; bool preferCalleeSave = false; RefPosition* rangeEndRefPosition; RefPosition* lastRefPosition; regMaskTP callerCalleePrefs = RBM_NONE; LsraLocation lastLocation; RegRecord* prevRegRec = nullptr; regMaskTP prevRegBit = RBM_NONE; // These are used in the post-selection updates, and must be set for any selection. regMaskTP freeCandidates; regMaskTP matchingConstants; regMaskTP unassignedSet; regMaskTP foundRegBit; // Compute the sets for COVERS, OWN_PREFERENCE, COVERS_RELATED, COVERS_FULL and UNASSIGNED together, // as they all require similar computation. regMaskTP coversSet; regMaskTP preferenceSet; regMaskTP coversRelatedSet; regMaskTP coversFullSet; bool coversSetsCalculated = false; bool found = false; bool skipAllocation = false; regNumber foundReg = REG_NA; // If the selected register is already assigned to the current internal FORCEINLINE bool isAlreadyAssigned() { assert(found && isSingleRegister(candidates)); return (prevRegBit & preferences) == foundRegBit; } bool applySelection(int selectionScore, regMaskTP selectionCandidates); bool applySingleRegSelection(int selectionScore, regMaskTP selectionCandidate); FORCEINLINE void calculateCoversSets(); FORCEINLINE void reset(Interval* interval, RefPosition* refPosition); #define REG_SEL_DEF(stat, value, shortname, orderSeqId) FORCEINLINE void try_##stat(); #include "lsra_score.h" #undef REG_SEL_DEF }; RegisterSelection* regSelector; /***************************************************************************** * For Resolution phase ****************************************************************************/ // TODO-Throughput: Consider refactoring this so that we keep a map from regs to vars for better scaling unsigned int regMapCount; // When we split edges, we create new blocks, and instead of expanding the VarToRegMaps, we // rely on the property that the "in" map is the same as the "from" block of the edge, and the // "out" map is the same as the "to" block of the edge (by construction). // So, for any block whose bbNum is greater than bbNumMaxBeforeResolution, we use the // splitBBNumToTargetBBNumMap. // TODO-Throughput: We may want to look into the cost/benefit tradeoff of doing this vs. expanding // the arrays. unsigned bbNumMaxBeforeResolution; struct SplitEdgeInfo { unsigned fromBBNum; unsigned toBBNum; }; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, SplitEdgeInfo> SplitBBNumToTargetBBNumMap; SplitBBNumToTargetBBNumMap* splitBBNumToTargetBBNumMap; SplitBBNumToTargetBBNumMap* getSplitBBNumToTargetBBNumMap() { if (splitBBNumToTargetBBNumMap == nullptr) { splitBBNumToTargetBBNumMap = new (getAllocator(compiler)) SplitBBNumToTargetBBNumMap(getAllocator(compiler)); } return splitBBNumToTargetBBNumMap; } SplitEdgeInfo getSplitEdgeInfo(unsigned int bbNum); void initVarRegMaps(); void setInVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg); void setOutVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg); VarToRegMap getInVarToRegMap(unsigned int bbNum); VarToRegMap getOutVarToRegMap(unsigned int bbNum); void setVarReg(VarToRegMap map, unsigned int trackedVarIndex, regNumber reg); regNumber getVarReg(VarToRegMap map, unsigned int trackedVarIndex); // Initialize the incoming VarToRegMap to the given map values (generally a predecessor of // the block) VarToRegMap setInVarToRegMap(unsigned int bbNum, VarToRegMap srcVarToRegMap); regNumber getTempRegForResolution(BasicBlock* fromBlock, BasicBlock* toBlock, var_types type); #ifdef DEBUG void dumpVarToRegMap(VarToRegMap map); void dumpInVarToRegMap(BasicBlock* block); void dumpOutVarToRegMap(BasicBlock* block); // There are three points at which a tuple-style dump is produced, and each // differs slightly: // - In LSRA_DUMP_PRE, it does a simple dump of each node, with indications of what // tree nodes are consumed. // - In LSRA_DUMP_REFPOS, which is after the intervals are built, but before // register allocation, each node is dumped, along with all of the RefPositions, // The Intervals are identifed as Lnnn for lclVar intervals, Innn for for other // intervals, and Tnnn for internal temps. // - In LSRA_DUMP_POST, which is after register allocation, the registers are // shown. enum LsraTupleDumpMode{LSRA_DUMP_PRE, LSRA_DUMP_REFPOS, LSRA_DUMP_POST}; void lsraGetOperandString(GenTree* tree, LsraTupleDumpMode mode, char* operandString, unsigned operandStringLength); void lsraDispNode(GenTree* tree, LsraTupleDumpMode mode, bool hasDest); void DumpOperandDefs( GenTree* operand, bool& first, LsraTupleDumpMode mode, char* operandString, const unsigned operandStringLength); void TupleStyleDump(LsraTupleDumpMode mode); LsraLocation maxNodeLocation; // Width of various fields - used to create a streamlined dump during allocation that shows the // state of all the registers in columns. int regColumnWidth; int regTableIndent; const char* columnSeparator; const char* line; const char* leftBox; const char* middleBox; const char* rightBox; static const int MAX_FORMAT_CHARS = 12; char intervalNameFormat[MAX_FORMAT_CHARS]; char regNameFormat[MAX_FORMAT_CHARS]; char shortRefPositionFormat[MAX_FORMAT_CHARS]; char emptyRefPositionFormat[MAX_FORMAT_CHARS]; char indentFormat[MAX_FORMAT_CHARS]; static const int MAX_LEGEND_FORMAT_CHARS = 25; char bbRefPosFormat[MAX_LEGEND_FORMAT_CHARS]; char legendFormat[MAX_LEGEND_FORMAT_CHARS]; // How many rows have we printed since last printing a "title row"? static const int MAX_ROWS_BETWEEN_TITLES = 50; int rowCountSinceLastTitle; // Current mask of registers being printed in the dump. regMaskTP lastDumpedRegisters; regMaskTP registersToDump; int lastUsedRegNumIndex; bool shouldDumpReg(regNumber regNum) { return (registersToDump & genRegMask(regNum)) != 0; } void dumpRegRecordHeader(); void dumpRegRecordTitle(); void dumpRegRecordTitleIfNeeded(); void dumpRegRecordTitleLines(); void dumpRegRecords(); void dumpNewBlock(BasicBlock* currentBlock, LsraLocation location); // An abbreviated RefPosition dump for printing with column-based register state void dumpRefPositionShort(RefPosition* refPosition, BasicBlock* currentBlock); // Print the number of spaces occupied by a dumpRefPositionShort() void dumpEmptyRefPosition(); // A dump of Referent, in exactly regColumnWidth characters void dumpIntervalName(Interval* interval); // Events during the allocation phase that cause some dump output enum LsraDumpEvent{ // Conflicting def/use LSRA_EVENT_DEFUSE_CONFLICT, LSRA_EVENT_DEFUSE_FIXED_DELAY_USE, LSRA_EVENT_DEFUSE_CASE1, LSRA_EVENT_DEFUSE_CASE2, LSRA_EVENT_DEFUSE_CASE3, LSRA_EVENT_DEFUSE_CASE4, LSRA_EVENT_DEFUSE_CASE5, LSRA_EVENT_DEFUSE_CASE6, // Spilling LSRA_EVENT_SPILL, LSRA_EVENT_SPILL_EXTENDED_LIFETIME, LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL, LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL_AFTER_SPILL, LSRA_EVENT_DONE_KILL_GC_REFS, LSRA_EVENT_NO_GC_KILLS, // Block boundaries LSRA_EVENT_START_BB, LSRA_EVENT_END_BB, // Miscellaneous LSRA_EVENT_FREE_REGS, LSRA_EVENT_UPPER_VECTOR_SAVE, LSRA_EVENT_UPPER_VECTOR_RESTORE, // Characteristics of the current RefPosition LSRA_EVENT_INCREMENT_RANGE_END, // ??? LSRA_EVENT_LAST_USE, LSRA_EVENT_LAST_USE_DELAYED, LSRA_EVENT_NEEDS_NEW_REG, // Allocation decisions LSRA_EVENT_FIXED_REG, LSRA_EVENT_EXP_USE, LSRA_EVENT_ZERO_REF, LSRA_EVENT_NO_ENTRY_REG_ALLOCATED, LSRA_EVENT_KEPT_ALLOCATION, LSRA_EVENT_COPY_REG, LSRA_EVENT_MOVE_REG, LSRA_EVENT_ALLOC_REG, LSRA_EVENT_NO_REG_ALLOCATED, LSRA_EVENT_RELOAD, LSRA_EVENT_SPECIAL_PUTARG, LSRA_EVENT_REUSE_REG, }; void dumpLsraAllocationEvent(LsraDumpEvent event, Interval* interval = nullptr, regNumber reg = REG_NA, BasicBlock* currentBlock = nullptr, RegisterScore registerScore = NONE); void validateIntervals(); #endif // DEBUG #if TRACK_LSRA_STATS unsigned regCandidateVarCount; void updateLsraStat(LsraStat stat, unsigned currentBBNum); void dumpLsraStats(FILE* file); LsraStat getLsraStatFromScore(RegisterScore registerScore); LsraStat firstRegSelStat = STAT_FREE; public: virtual void dumpLsraStatsCsv(FILE* file); virtual void dumpLsraStatsSummary(FILE* file); static const char* getStatName(unsigned stat); #define INTRACK_STATS(x) x #define INTRACK_STATS_IF(condition, work) \ if (condition) \ { \ work; \ } #else // !TRACK_LSRA_STATS #define INTRACK_STATS(x) #define INTRACK_STATS_IF(condition, work) #endif // !TRACK_LSRA_STATS private: Compiler* compiler; CompAllocator getAllocator(Compiler* comp) { return comp->getAllocator(CMK_LSRA); } #ifdef DEBUG // This is used for dumping RefPosition* activeRefPosition; #endif // DEBUG IntervalList intervals; RegRecord physRegs[REG_COUNT]; // Map from tracked variable index to Interval*. Interval** localVarIntervals; // Set of blocks that have been visited. BlockSet bbVisitedSet; void markBlockVisited(BasicBlock* block) { BlockSetOps::AddElemD(compiler, bbVisitedSet, block->bbNum); } void clearVisitedBlocks() { BlockSetOps::ClearD(compiler, bbVisitedSet); } bool isBlockVisited(BasicBlock* block) { return BlockSetOps::IsMember(compiler, bbVisitedSet, block->bbNum); } #if DOUBLE_ALIGN bool doDoubleAlign; #endif // A map from bbNum to the block information used during register allocation. LsraBlockInfo* blockInfo; BasicBlock* findPredBlockForLiveIn(BasicBlock* block, BasicBlock* prevBlock DEBUGARG(bool* pPredBlockIsAllocated)); // The order in which the blocks will be allocated. // This is any array of BasicBlock*, in the order in which they should be traversed. BasicBlock** blockSequence; // The verifiedAllBBs flag indicates whether we have verified that all BBs have been // included in the blockSeuqence above, during setBlockSequence(). bool verifiedAllBBs; void setBlockSequence(); int compareBlocksForSequencing(BasicBlock* block1, BasicBlock* block2, bool useBlockWeights); BasicBlockList* blockSequenceWorkList; bool blockSequencingDone; #ifdef DEBUG // LSRA must not change number of blocks and blockEpoch that it initializes at start. unsigned blockEpoch; #endif // DEBUG void addToBlockSequenceWorkList(BlockSet sequencedBlockSet, BasicBlock* block, BlockSet& predSet); void removeFromBlockSequenceWorkList(BasicBlockList* listNode, BasicBlockList* prevNode); BasicBlock* getNextCandidateFromWorkList(); // Indicates whether the allocation pass has been completed. bool allocationPassComplete; // The bbNum of the block being currently allocated or resolved. unsigned int curBBNum; // The current location LsraLocation currentLoc; // The first location in a cold or funclet block. LsraLocation firstColdLoc; // The ordinal of the block we're on (i.e. this is the curBBSeqNum-th block we've allocated). unsigned int curBBSeqNum; // The number of blocks that we've sequenced. unsigned int bbSeqCount; // The Location of the start of the current block. LsraLocation curBBStartLocation; // True if the method contains any critical edges. bool hasCriticalEdges; // True if there are any register candidate lclVars available for allocation. bool enregisterLocalVars; virtual bool willEnregisterLocalVars() const { return enregisterLocalVars; } // Ordered list of RefPositions RefPositionList refPositions; // Per-block variable location mappings: an array indexed by block number that yields a // pointer to an array of regNumber, one per variable. VarToRegMap* inVarToRegMaps; VarToRegMap* outVarToRegMaps; // A temporary VarToRegMap used during the resolution of critical edges. VarToRegMap sharedCriticalVarToRegMap; PhasedVar<regMaskTP> availableIntRegs; PhasedVar<regMaskTP> availableFloatRegs; PhasedVar<regMaskTP> availableDoubleRegs; // The set of all register candidates. Note that this may be a subset of tracked vars. VARSET_TP registerCandidateVars; // Current set of live register candidate vars, used during building of RefPositions to determine // whether to preference to callee-save. VARSET_TP currentLiveVars; // Set of variables that may require resolution across an edge. // This is first constructed during interval building, to contain all the lclVars that are live at BB edges. // Then, any lclVar that is always in the same register is removed from the set. VARSET_TP resolutionCandidateVars; // This set contains all the lclVars that are ever spilled or split. VARSET_TP splitOrSpilledVars; // Set of floating point variables to consider for callee-save registers. VARSET_TP fpCalleeSaveCandidateVars; // Set of variables exposed on EH flow edges. VARSET_TP exceptVars; // Set of variables exposed on finally edges. These must be zero-init if they are refs or if compInitMem is true. VARSET_TP finallyVars; #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE #if defined(TARGET_AMD64) static const var_types LargeVectorSaveType = TYP_SIMD16; #elif defined(TARGET_ARM64) static const var_types LargeVectorSaveType = TYP_DOUBLE; #endif // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) // Set of large vector (TYP_SIMD32 on AVX) variables. VARSET_TP largeVectorVars; // Set of large vector (TYP_SIMD32 on AVX) variables to consider for callee-save registers. VARSET_TP largeVectorCalleeSaveCandidateVars; #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE //----------------------------------------------------------------------- // Register status //----------------------------------------------------------------------- regMaskTP m_AvailableRegs; regNumber getRegForType(regNumber reg, var_types regType) { #ifdef TARGET_ARM if ((regType == TYP_DOUBLE) && !genIsValidDoubleReg(reg)) { reg = REG_PREV(reg); } #endif // TARGET_ARM return reg; } regMaskTP getRegMask(regNumber reg, var_types regType) { reg = getRegForType(reg, regType); regMaskTP regMask = genRegMask(reg); #ifdef TARGET_ARM if (regType == TYP_DOUBLE) { assert(genIsValidDoubleReg(reg)); regMask |= (regMask << 1); } #endif // TARGET_ARM return regMask; } void resetAvailableRegs() { m_AvailableRegs = (availableIntRegs | availableFloatRegs); m_RegistersWithConstants = RBM_NONE; } bool isRegAvailable(regNumber reg, var_types regType) { regMaskTP regMask = getRegMask(reg, regType); return (m_AvailableRegs & regMask) == regMask; } void setRegsInUse(regMaskTP regMask) { m_AvailableRegs &= ~regMask; } void setRegInUse(regNumber reg, var_types regType) { regMaskTP regMask = getRegMask(reg, regType); setRegsInUse(regMask); } void makeRegsAvailable(regMaskTP regMask) { m_AvailableRegs |= regMask; } void makeRegAvailable(regNumber reg, var_types regType) { regMaskTP regMask = getRegMask(reg, regType); makeRegsAvailable(regMask); } void clearNextIntervalRef(regNumber reg, var_types regType); void updateNextIntervalRef(regNumber reg, Interval* interval); void clearSpillCost(regNumber reg, var_types regType); void updateSpillCost(regNumber reg, Interval* interval); regMaskTP m_RegistersWithConstants; void clearConstantReg(regNumber reg, var_types regType) { m_RegistersWithConstants &= ~getRegMask(reg, regType); } void setConstantReg(regNumber reg, var_types regType) { m_RegistersWithConstants |= getRegMask(reg, regType); } bool isRegConstant(regNumber reg, var_types regType) { reg = getRegForType(reg, regType); regMaskTP regMask = getRegMask(reg, regType); return (m_RegistersWithConstants & regMask) == regMask; } regMaskTP getMatchingConstants(regMaskTP mask, Interval* currentInterval, RefPosition* refPosition); regMaskTP fixedRegs; LsraLocation nextFixedRef[REG_COUNT]; void updateNextFixedRef(RegRecord* regRecord, RefPosition* nextRefPosition); LsraLocation getNextFixedRef(regNumber regNum, var_types regType) { LsraLocation loc = nextFixedRef[regNum]; #ifdef TARGET_ARM if (regType == TYP_DOUBLE) { loc = Min(loc, nextFixedRef[regNum + 1]); } #endif return loc; } LsraLocation nextIntervalRef[REG_COUNT]; LsraLocation getNextIntervalRef(regNumber regNum, var_types regType) { LsraLocation loc = nextIntervalRef[regNum]; #ifdef TARGET_ARM if (regType == TYP_DOUBLE) { loc = Min(loc, nextIntervalRef[regNum + 1]); } #endif return loc; } weight_t spillCost[REG_COUNT]; regMaskTP regsBusyUntilKill; regMaskTP regsInUseThisLocation; regMaskTP regsInUseNextLocation; bool isRegBusy(regNumber reg, var_types regType) { regMaskTP regMask = getRegMask(reg, regType); return (regsBusyUntilKill & regMask) != RBM_NONE; } void setRegBusyUntilKill(regNumber reg, var_types regType) { regsBusyUntilKill |= getRegMask(reg, regType); } void clearRegBusyUntilKill(regNumber reg) { regsBusyUntilKill &= ~genRegMask(reg); } bool isRegInUse(regNumber reg, var_types regType) { regMaskTP regMask = getRegMask(reg, regType); return (regsInUseThisLocation & regMask) != RBM_NONE; } void resetRegState() { resetAvailableRegs(); regsBusyUntilKill = RBM_NONE; } bool conflictingFixedRegReference(regNumber regNum, RefPosition* refPosition); // This method should not be used and is here to retain old behavior. // It should be replaced by isRegAvailable(). // See comment in allocateReg(); bool isFree(RegRecord* regRecord); //----------------------------------------------------------------------- // Build methods //----------------------------------------------------------------------- // The listNodePool is used to maintain the RefInfo for nodes that are "in flight" // i.e. whose consuming node has not yet been handled. RefInfoListNodePool listNodePool; // When Def RefPositions are built for a node, their RefInfoListNode // (GenTree* to RefPosition* mapping) is placed in the defList. // As the consuming node is handled, it removes the RefInfoListNode from the // defList, use the interval associated with the corresponding Def RefPosition and // use it to build the Use RefPosition. RefInfoList defList; // As we build uses, we may want to preference the next definition (i.e. the register produced // by the current node) to the same register as one of its uses. This is done by setting // 'tgtPrefUse' to that RefPosition. RefPosition* tgtPrefUse = nullptr; RefPosition* tgtPrefUse2 = nullptr; // The following keep track of information about internal (temporary register) intervals // during the building of a single node. static const int MaxInternalCount = 5; RefPosition* internalDefs[MaxInternalCount]; int internalCount = 0; bool setInternalRegsDelayFree; // When a RefTypeUse is marked as 'delayRegFree', we also want to mark the RefTypeDef // in the next Location as 'hasInterferingUses'. This is accomplished by setting this // 'pendingDelayFree' to true as they are created, and clearing it as a new node is // handled in 'BuildNode'. bool pendingDelayFree; // This method clears the "build state" before starting to handle a new node. void clearBuildState() { tgtPrefUse = nullptr; tgtPrefUse2 = nullptr; internalCount = 0; setInternalRegsDelayFree = false; pendingDelayFree = false; } bool isCandidateMultiRegLclVar(GenTreeLclVar* lclNode); bool checkContainedOrCandidateLclVar(GenTreeLclVar* lclNode); RefPosition* BuildUse(GenTree* operand, regMaskTP candidates = RBM_NONE, int multiRegIdx = 0); void setDelayFree(RefPosition* use); int BuildBinaryUses(GenTreeOp* node, regMaskTP candidates = RBM_NONE); #ifdef TARGET_XARCH int BuildRMWUses(GenTree* node, GenTree* op1, GenTree* op2, regMaskTP candidates = RBM_NONE); #endif // !TARGET_XARCH // This is the main entry point for building the RefPositions for a node. // These methods return the number of sources. int BuildNode(GenTree* tree); void getTgtPrefOperands(GenTree* tree, GenTree* op1, GenTree* op2, bool* prefOp1, bool* prefOp2); bool supportsSpecialPutArg(); int BuildSimple(GenTree* tree); int BuildOperandUses(GenTree* node, regMaskTP candidates = RBM_NONE); int BuildDelayFreeUses(GenTree* node, GenTree* rmwNode = nullptr, regMaskTP candidates = RBM_NONE); int BuildIndirUses(GenTreeIndir* indirTree, regMaskTP candidates = RBM_NONE); int BuildAddrUses(GenTree* addr, regMaskTP candidates = RBM_NONE); void HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* callHasFloatRegArgs); RefPosition* BuildDef(GenTree* tree, regMaskTP dstCandidates = RBM_NONE, int multiRegIdx = 0); void BuildDefs(GenTree* tree, int dstCount, regMaskTP dstCandidates = RBM_NONE); void BuildDefsWithKills(GenTree* tree, int dstCount, regMaskTP dstCandidates, regMaskTP killMask); int BuildReturn(GenTree* tree); #ifdef TARGET_XARCH // This method, unlike the others, returns the number of sources, since it may be called when // 'tree' is contained. int BuildShiftRotate(GenTree* tree); #endif // TARGET_XARCH #ifdef TARGET_ARM int BuildShiftLongCarry(GenTree* tree); #endif int BuildPutArgReg(GenTreeUnOp* node); int BuildCall(GenTreeCall* call); int BuildCmp(GenTree* tree); int BuildBlockStore(GenTreeBlk* blkNode); int BuildModDiv(GenTree* tree); int BuildIntrinsic(GenTree* tree); void BuildStoreLocDef(GenTreeLclVarCommon* storeLoc, LclVarDsc* varDsc, RefPosition* singleUseRef, int index); int BuildMultiRegStoreLoc(GenTreeLclVar* storeLoc); int BuildStoreLoc(GenTreeLclVarCommon* tree); int BuildIndir(GenTreeIndir* indirTree); int BuildGCWriteBarrier(GenTree* tree); int BuildCast(GenTreeCast* cast); #if defined(TARGET_XARCH) // returns true if the tree can use the read-modify-write memory instruction form bool isRMWRegOper(GenTree* tree); int BuildMul(GenTree* tree); void SetContainsAVXFlags(unsigned sizeOfSIMDVector = 0); #endif // defined(TARGET_XARCH) #if defined(TARGET_X86) // Move the last use bit, if any, from 'fromTree' to 'toTree'; 'fromTree' must be contained. void CheckAndMoveRMWLastUse(GenTree* fromTree, GenTree* toTree) { // If 'fromTree' is not a last-use lclVar, there's nothing to do. if ((fromTree == nullptr) || !fromTree->OperIs(GT_LCL_VAR) || ((fromTree->gtFlags & GTF_VAR_DEATH) == 0)) { return; } // If 'fromTree' was a lclVar, it must be contained and 'toTree' must match. if (!fromTree->isContained() || (toTree == nullptr) || !toTree->OperIs(GT_LCL_VAR) || (fromTree->AsLclVarCommon()->GetLclNum() != toTree->AsLclVarCommon()->GetLclNum())) { assert(!"Unmatched RMW indirections"); return; } // This is probably not necessary, but keeps things consistent. fromTree->gtFlags &= ~GTF_VAR_DEATH; toTree->gtFlags |= GTF_VAR_DEATH; } #endif // TARGET_X86 #ifdef FEATURE_SIMD int BuildSIMD(GenTreeSIMD* tree); #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS int BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree, int* pDstCount); #endif // FEATURE_HW_INTRINSICS int BuildPutArgStk(GenTreePutArgStk* argNode); #if FEATURE_ARG_SPLIT int BuildPutArgSplit(GenTreePutArgSplit* tree); #endif // FEATURE_ARG_SPLIT int BuildLclHeap(GenTree* tree); }; /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Interval XX XX XX XX This is the fundamental data structure for linear scan register XX XX allocation. It represents the live range(s) for a variable or temp. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ class Interval : public Referenceable { public: Interval(RegisterType registerType, regMaskTP registerPreferences) : registerPreferences(registerPreferences) , relatedInterval(nullptr) , assignedReg(nullptr) , varNum(0) , physReg(REG_COUNT) , registerType(registerType) , isActive(false) , isLocalVar(false) , isSplit(false) , isSpilled(false) , isInternal(false) , isStructField(false) , isPromotedStruct(false) , hasConflictingDefUse(false) , hasInterferingUses(false) , isSpecialPutArg(false) , preferCalleeSave(false) , isConstant(false) #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE , isUpperVector(false) , isPartiallySpilled(false) #endif , isWriteThru(false) , isSingleDef(false) #ifdef DEBUG , intervalIndex(0) #endif { } #ifdef DEBUG // print out representation void dump(); // concise representation for embedding void tinyDump(); // extremely concise representation void microDump(); #endif // DEBUG void setLocalNumber(Compiler* compiler, unsigned lclNum, LinearScan* l); // Fixed registers for which this Interval has a preference regMaskTP registerPreferences; // The relatedInterval is: // - for any other interval, it is the interval to which this interval // is currently preferenced (e.g. because they are related by a copy) Interval* relatedInterval; // The assignedReg is the RecRecord for the register to which this interval // has been assigned at some point - if the interval is active, this is the // register it currently occupies. RegRecord* assignedReg; unsigned int varNum; // This is the "variable number": the index into the lvaTable array // The register to which it is currently assigned. regNumber physReg; RegisterType registerType; // Is this Interval currently in a register and live? bool isActive; bool isLocalVar : 1; // Indicates whether this interval has been assigned to different registers bool isSplit : 1; // Indicates whether this interval is ever spilled bool isSpilled : 1; // indicates an interval representing the internal requirements for // generating code for a node (temp registers internal to the node) // Note that this interval may live beyond a node in the GT_ARR_LENREF/GT_IND // case (though never lives beyond a stmt) bool isInternal : 1; // true if this is a LocalVar for a struct field bool isStructField : 1; // true iff this is a GT_LDOBJ for a fully promoted (PROMOTION_TYPE_INDEPENDENT) struct bool isPromotedStruct : 1; // true if this is an SDSU interval for which the def and use have conflicting register // requirements bool hasConflictingDefUse : 1; // true if this interval's defining node has "delayRegFree" uses, either due to it being an RMW instruction, // OR because it requires an internal register that differs from the target. bool hasInterferingUses : 1; // True if this interval is defined by a putArg, whose source is a non-last-use lclVar. // During allocation, this flag will be cleared if the source is not already in the required register. // Othewise, we will leave the register allocated to the lclVar, but mark the RegRecord as // isBusyUntilKill, so that it won't be reused if the lclVar goes dead before the call. bool isSpecialPutArg : 1; // True if this interval interferes with a call. bool preferCalleeSave : 1; // True if this interval is defined by a constant node that may be reused and/or may be // able to reuse a constant that's already in a register. bool isConstant : 1; #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // True if this is a special interval for saving the upper half of a large vector. bool isUpperVector : 1; // This is a convenience method to avoid ifdef's everywhere this is used. bool IsUpperVector() const { return isUpperVector; } // True if this interval has been partially spilled bool isPartiallySpilled : 1; #else bool IsUpperVector() const { return false; } #endif // True if this interval is associated with a lclVar that is written to memory at each definition. bool isWriteThru : 1; // True if this interval has a single definition. bool isSingleDef : 1; #ifdef DEBUG unsigned int intervalIndex; #endif // DEBUG LclVarDsc* getLocalVar(Compiler* comp) { assert(isLocalVar); return comp->lvaGetDesc(this->varNum); } // Get the local tracked variable "index" (lvVarIndex), used in bitmasks. unsigned getVarIndex(Compiler* comp) { LclVarDsc* varDsc = getLocalVar(comp); assert(varDsc->lvTracked); // If this isn't true, we shouldn't be calling this function! return varDsc->lvVarIndex; } bool isAssignedTo(regNumber regNum) { // This uses regMasks to handle the case where a double actually occupies two registers // TODO-Throughput: This could/should be done more cheaply. return (physReg != REG_NA && (genRegMask(physReg, registerType) & genRegMask(regNum)) != RBM_NONE); } // Assign the related interval. void assignRelatedInterval(Interval* newRelatedInterval) { #ifdef DEBUG if (VERBOSE) { printf("Assigning related "); newRelatedInterval->microDump(); printf(" to "); this->microDump(); printf("\n"); } #endif // DEBUG relatedInterval = newRelatedInterval; } // Assign the related interval, but only if it isn't already assigned. bool assignRelatedIntervalIfUnassigned(Interval* newRelatedInterval) { if (relatedInterval == nullptr) { assignRelatedInterval(newRelatedInterval); return true; } else { #ifdef DEBUG if (VERBOSE) { printf("Interval "); this->microDump(); printf(" already has a related interval\n"); } #endif // DEBUG return false; } } // Get the current preferences for this Interval. // Note that when we have an assigned register we don't necessarily update the // registerPreferences to that register, as there may be multiple, possibly disjoint, // definitions. This method will return the current assigned register if any, or // the 'registerPreferences' otherwise. // regMaskTP getCurrentPreferences() { return (assignedReg == nullptr) ? registerPreferences : genRegMask(assignedReg->regNum); } void mergeRegisterPreferences(regMaskTP preferences) { // We require registerPreferences to have been initialized. assert(registerPreferences != RBM_NONE); // It is invalid to update with empty preferences assert(preferences != RBM_NONE); regMaskTP commonPreferences = (registerPreferences & preferences); if (commonPreferences != RBM_NONE) { registerPreferences = commonPreferences; return; } // There are no preferences in common. // Preferences need to reflect both cases where a var must occupy a specific register, // as well as cases where a var is live when a register is killed. // In the former case, we would like to record all such registers, however we don't // really want to use any registers that will interfere. // To approximate this, we never "or" together multi-reg sets, which are generally kill sets. if (!genMaxOneBit(preferences)) { // The new preference value is a multi-reg set, so it's probably a kill. // Keep the new value. registerPreferences = preferences; return; } if (!genMaxOneBit(registerPreferences)) { // The old preference value is a multi-reg set. // Keep the existing preference set, as it probably reflects one or more kills. // It may have been a union of multiple individual registers, but we can't // distinguish that case without extra cost. return; } // If we reach here, we have two disjoint single-reg sets. // Keep only the callee-save preferences, if not empty. // Otherwise, take the union of the preferences. regMaskTP newPreferences = registerPreferences | preferences; if (preferCalleeSave) { regMaskTP calleeSaveMask = (calleeSaveRegs(this->registerType) & (newPreferences)); if (calleeSaveMask != RBM_NONE) { newPreferences = calleeSaveMask; } } registerPreferences = newPreferences; } // Update the registerPreferences on the interval. // If there are conflicting requirements on this interval, set the preferences to // the union of them. That way maybe we'll get at least one of them. // An exception is made in the case where one of the existing or new // preferences are all callee-save, in which case we "prefer" the callee-save void updateRegisterPreferences(regMaskTP preferences) { // If this interval is preferenced, that interval may have already been assigned a // register, and we want to include that in the preferences. if ((relatedInterval != nullptr) && !relatedInterval->isActive) { mergeRegisterPreferences(relatedInterval->getCurrentPreferences()); } // Now merge the new preferences. mergeRegisterPreferences(preferences); } }; class RefPosition { public: // A RefPosition refers to either an Interval or a RegRecord. 'referent' points to one // of these types. If it refers to a RegRecord, then 'isPhysRegRef()' is true. If it // refers to an Interval, then 'isPhysRegRef()' is false. // referent can never be null. Referenceable* referent; // nextRefPosition is the next in code order. // Note that in either case there is no need for these to be doubly linked, as they // are only traversed in the forward direction, and are not moved. RefPosition* nextRefPosition; // The remaining fields are common to both options GenTree* treeNode; unsigned int bbNum; LsraLocation nodeLocation; // Prior to the allocation pass, registerAssignment captures the valid registers // for this RefPosition. // After the allocation pass, this contains the actual assignment regMaskTP registerAssignment; RefType refType; // NOTE: C++ only packs bitfields if the base type is the same. So make all the base // NOTE: types of the logically "bool" types that follow 'unsigned char', so they match // NOTE: RefType that precedes this, and multiRegIdx can also match. // Indicates whether this ref position is to be allocated a reg only if profitable. Currently these are the // ref positions that lower/codegen has indicated as reg optional and is considered a contained memory operand if // no reg is allocated. unsigned char regOptional : 1; // Used by RefTypeDef/Use positions of a multi-reg call node. // Indicates the position of the register that this ref position refers to. // The max bits needed is based on max value of MAX_RET_REG_COUNT value // across all targets and that happens 4 on on Arm. Hence index value // would be 0..MAX_RET_REG_COUNT-1. unsigned char multiRegIdx : 2; // Last Use - this may be true for multiple RefPositions in the same Interval unsigned char lastUse : 1; // Spill and Copy info // reload indicates that the value was spilled, and must be reloaded here. // spillAfter indicates that the value is spilled here, so a spill must be added. // singleDefSpill indicates that it is associated with a single-def var and if it // is decided to get spilled, it will be spilled at firstRefPosition def. That // way, the the value of stack will always be up-to-date and no more spills or // resolutions (from reg to stack) will be needed for such single-def var. // copyReg indicates that the value needs to be copied to a specific register, // but that it will also retain its current assigned register. // moveReg indicates that the value needs to be moved to a different register, // and that this will be its new assigned register. // A RefPosition may have any flag individually or the following combinations: // - reload and spillAfter (i.e. it remains in memory), but not in combination with copyReg or moveReg // (reload cannot exist with copyReg or moveReg; it should be reloaded into the appropriate reg) // - spillAfter and copyReg (i.e. it must be copied to a new reg for use, but is then spilled) // - spillAfter and moveReg (i.e. it most be both spilled and moved) // NOTE: a moveReg involves an explicit move, and would usually not be needed for a fixed Reg if it is going // to be spilled, because the code generator will do the move to the fixed register, and doesn't need to // record the new register location as the new "home" location of the lclVar. However, if there is a conflicting // use at the same location (e.g. lclVar V1 is in rdx and needs to be in rcx, but V2 needs to be in rdx), then // we need an explicit move. // - copyReg and moveReg must not exist with each other. unsigned char reload : 1; unsigned char spillAfter : 1; unsigned char singleDefSpill : 1; unsigned char writeThru : 1; // true if this var is defined in a register and also spilled. spillAfter must NOT be // set. unsigned char copyReg : 1; unsigned char moveReg : 1; // true if this var is moved to a new register unsigned char isPhysRegRef : 1; // true if 'referent' points of a RegRecord, false if it points to an Interval unsigned char isFixedRegRef : 1; unsigned char isLocalDefUse : 1; // delayRegFree indicates that the register should not be freed right away, but instead wait // until the next Location after it would normally be freed. This is used for the case of // non-commutative binary operators, where op2 must not be assigned the same register as // the target. We do this by not freeing it until after the target has been defined. // Another option would be to actually change the Location of the op2 use until the same // Location as the def, but then it could potentially reuse a register that has been freed // from the other source(s), e.g. if it's a lastUse or spilled. unsigned char delayRegFree : 1; // outOfOrder is marked on a (non-def) RefPosition that doesn't follow a definition of the // register currently assigned to the Interval. This happens when we use the assigned // register from a predecessor that is not the most recently allocated BasicBlock. unsigned char outOfOrder : 1; #ifdef DEBUG // Minimum number registers that needs to be ensured while // constraining candidates for this ref position under // LSRA stress. unsigned minRegCandidateCount; // The unique RefPosition number, equal to its index in the // refPositions list. Only used for debugging dumps. unsigned rpNum; #endif // DEBUG RefPosition(unsigned int bbNum, LsraLocation nodeLocation, GenTree* treeNode, RefType refType) : referent(nullptr) , nextRefPosition(nullptr) , treeNode(treeNode) , bbNum(bbNum) , nodeLocation(nodeLocation) , registerAssignment(RBM_NONE) , refType(refType) , multiRegIdx(0) , lastUse(false) , reload(false) , spillAfter(false) , singleDefSpill(false) , writeThru(false) , copyReg(false) , moveReg(false) , isPhysRegRef(false) , isFixedRegRef(false) , isLocalDefUse(false) , delayRegFree(false) , outOfOrder(false) #ifdef DEBUG , minRegCandidateCount(1) , rpNum(0) #endif { } Interval* getInterval() { assert(!isPhysRegRef); return (Interval*)referent; } void setInterval(Interval* i) { referent = i; isPhysRegRef = false; } RegRecord* getReg() { assert(isPhysRegRef); return (RegRecord*)referent; } void setReg(RegRecord* r) { referent = r; isPhysRegRef = true; registerAssignment = genRegMask(r->regNum); } regNumber assignedReg() { if (registerAssignment == RBM_NONE) { return REG_NA; } return genRegNumFromMask(registerAssignment); } // Returns true if it is a reference on a gentree node. bool IsActualRef() { switch (refType) { case RefTypeDef: case RefTypeUse: #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE case RefTypeUpperVectorSave: case RefTypeUpperVectorRestore: #endif return true; // These must always be marked RegOptional. case RefTypeExpUse: case RefTypeParamDef: case RefTypeDummyDef: case RefTypeZeroInit: assert(RegOptional()); return false; default: return false; } } bool IsPhysRegRef() { return ((refType == RefTypeFixedReg) || (refType == RefTypeKill)); } void setRegOptional(bool val) { regOptional = val; } // Returns true whether this ref position is to be allocated // a reg only if it is profitable. bool RegOptional() { // TODO-CQ: Right now if a ref position is marked as // copyreg or movereg, then it is not treated as // 'allocate if profitable'. This is an implementation // limitation that needs to be addressed. return regOptional && !copyReg && !moveReg; } void setMultiRegIdx(unsigned idx) { multiRegIdx = idx; assert(multiRegIdx == idx); } unsigned getMultiRegIdx() { return multiRegIdx; } LsraLocation getRefEndLocation() { return delayRegFree ? nodeLocation + 1 : nodeLocation; } RefPosition* getRangeEndRef() { if (lastUse || nextRefPosition == nullptr || spillAfter) { return this; } // It would seem to make sense to only return 'nextRefPosition' if it is a lastUse, // and otherwise return `lastRefPosition', but that tends to excessively lengthen // the range for heuristic purposes. // TODO-CQ: Look into how this might be improved . return nextRefPosition; } LsraLocation getRangeEndLocation() { return getRangeEndRef()->getRefEndLocation(); } bool isIntervalRef() { return (!IsPhysRegRef() && (referent != nullptr)); } // isFixedRefOfRegMask indicates that the RefPosition has a fixed assignment to the register // specified by the given mask bool isFixedRefOfRegMask(regMaskTP regMask) { assert(genMaxOneBit(regMask)); return (registerAssignment == regMask); } // isFixedRefOfReg indicates that the RefPosition has a fixed assignment to the given register bool isFixedRefOfReg(regNumber regNum) { return (isFixedRefOfRegMask(genRegMask(regNum))); } #ifdef DEBUG // operator= copies everything except 'rpNum', which must remain unique RefPosition& operator=(const RefPosition& rp) { unsigned rpNumSave = rpNum; memcpy(this, &rp, sizeof(rp)); rpNum = rpNumSave; return *this; } void dump(LinearScan* linearScan); #endif // DEBUG }; #ifdef DEBUG void dumpRegMask(regMaskTP regs); #endif // DEBUG /*****************************************************************************/ #endif //_LSRA_H_ /*****************************************************************************/
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*****************************************************************************/ #ifndef _LSRA_H_ #define _LSRA_H_ #include "arraylist.h" #include "smallhash.h" // Minor and forward-reference types class Interval; class RefPosition; class LinearScan; class RegRecord; template <class T> class ArrayStack; // LsraLocation tracks the linearized order of the nodes. // Each node is assigned two LsraLocations - one for all the uses and all but the last // def, and a second location for the last def (if any) typedef unsigned int LsraLocation; const unsigned int MinLocation = 0; const unsigned int MaxLocation = UINT_MAX; // max number of registers an operation could require internally (in addition to uses and defs) const unsigned int MaxInternalRegisters = 8; const unsigned int RegisterTypeCount = 2; /***************************************************************************** * Register types *****************************************************************************/ typedef var_types RegisterType; #define IntRegisterType TYP_INT #define FloatRegisterType TYP_FLOAT //------------------------------------------------------------------------ // regType: Return the RegisterType to use for a given type // // Arguments: // type - the type of interest // template <class T> RegisterType regType(T type) { return varTypeUsesFloatReg(TypeGet(type)) ? FloatRegisterType : IntRegisterType; } //------------------------------------------------------------------------ // useFloatReg: Check if the given var_type should be allocated to a FloatRegisterType // inline bool useFloatReg(var_types type) { return (regType(type) == FloatRegisterType); } //------------------------------------------------------------------------ // registerTypesEquivalent: Check to see if two RegisterTypes are equivalent // inline bool registerTypesEquivalent(RegisterType a, RegisterType b) { return varTypeIsIntegralOrI(a) == varTypeIsIntegralOrI(b); } //------------------------------------------------------------------------ // calleeSaveRegs: Get the set of callee-save registers of the given RegisterType // inline regMaskTP calleeSaveRegs(RegisterType rt) { return varTypeIsIntegralOrI(rt) ? RBM_INT_CALLEE_SAVED : RBM_FLT_CALLEE_SAVED; } //------------------------------------------------------------------------ // callerSaveRegs: Get the set of caller-save registers of the given RegisterType // inline regMaskTP callerSaveRegs(RegisterType rt) { return varTypeIsIntegralOrI(rt) ? RBM_INT_CALLEE_TRASH : RBM_FLT_CALLEE_TRASH; } //------------------------------------------------------------------------ // RefInfo: Captures the necessary information for a definition that is "in-flight" // during `buildIntervals` (i.e. a tree-node definition has been encountered, // but not its use). This includes the RefPosition and its associated // GenTree node. // struct RefInfo { RefPosition* ref; GenTree* treeNode; RefInfo(RefPosition* r, GenTree* t) : ref(r), treeNode(t) { } // default constructor for data structures RefInfo() { } }; //------------------------------------------------------------------------ // RefInfoListNode: used to store a single `RefInfo` value for a // node during `buildIntervals`. // // This is the node type for `RefInfoList` below. // class RefInfoListNode final : public RefInfo { friend class RefInfoList; friend class RefInfoListNodePool; RefInfoListNode* m_next; // The next node in the list public: RefInfoListNode(RefPosition* r, GenTree* t) : RefInfo(r, t) { } //------------------------------------------------------------------------ // RefInfoListNode::Next: Returns the next node in the list. RefInfoListNode* Next() const { return m_next; } }; //------------------------------------------------------------------------ // RefInfoList: used to store a list of `RefInfo` values for a // node during `buildIntervals`. // // This list of 'RefInfoListNode's contains the source nodes consumed by // a node, and is created by 'BuildNode'. // class RefInfoList final { friend class RefInfoListNodePool; RefInfoListNode* m_head; // The head of the list RefInfoListNode* m_tail; // The tail of the list public: RefInfoList() : m_head(nullptr), m_tail(nullptr) { } RefInfoList(RefInfoListNode* node) : m_head(node), m_tail(node) { assert(m_head->m_next == nullptr); } //------------------------------------------------------------------------ // RefInfoList::IsEmpty: Returns true if the list is empty. // bool IsEmpty() const { return m_head == nullptr; } //------------------------------------------------------------------------ // RefInfoList::Begin: Returns the first node in the list. // RefInfoListNode* Begin() const { return m_head; } //------------------------------------------------------------------------ // RefInfoList::End: Returns the position after the last node in the // list. The returned value is suitable for use as // a sentinel for iteration. // RefInfoListNode* End() const { return nullptr; } //------------------------------------------------------------------------ // RefInfoList::End: Returns the position after the last node in the // list. The returned value is suitable for use as // a sentinel for iteration. // RefInfoListNode* Last() const { return m_tail; } //------------------------------------------------------------------------ // RefInfoList::Append: Appends a node to the list. // // Arguments: // node - The node to append. Must not be part of an existing list. // void Append(RefInfoListNode* node) { assert(node->m_next == nullptr); if (m_tail == nullptr) { assert(m_head == nullptr); m_head = node; } else { m_tail->m_next = node; } m_tail = node; } //------------------------------------------------------------------------ // RefInfoList::Append: Appends another list to this list. // // Arguments: // other - The list to append. // void Append(RefInfoList other) { if (m_tail == nullptr) { assert(m_head == nullptr); m_head = other.m_head; } else { m_tail->m_next = other.m_head; } m_tail = other.m_tail; } //------------------------------------------------------------------------ // RefInfoList::Prepend: Prepends a node to the list. // // Arguments: // node - The node to prepend. Must not be part of an existing list. // void Prepend(RefInfoListNode* node) { assert(node->m_next == nullptr); if (m_head == nullptr) { assert(m_tail == nullptr); m_tail = node; } else { node->m_next = m_head; } m_head = node; } //------------------------------------------------------------------------ // RefInfoList::Add: Adds a node to the list. // // Arguments: // node - The node to add. Must not be part of an existing list. // prepend - True if it should be prepended (otherwise is appended) // void Add(RefInfoListNode* node, bool prepend) { if (prepend) { Prepend(node); } else { Append(node); } } //------------------------------------------------------------------------ // removeListNode - retrieve the RefInfo for the given node // // Notes: // The BuildNode methods use this helper to retrieve the RefInfo for child nodes // from the useList being constructed. // RefInfoListNode* removeListNode(RefInfoListNode* listNode, RefInfoListNode* prevListNode) { RefInfoListNode* nextNode = listNode->Next(); if (prevListNode == nullptr) { m_head = nextNode; } else { prevListNode->m_next = nextNode; } if (nextNode == nullptr) { m_tail = prevListNode; } listNode->m_next = nullptr; return listNode; } // removeListNode - remove the RefInfoListNode for the given GenTree node from the defList RefInfoListNode* removeListNode(GenTree* node); // Same as above but takes a multiRegIdx to support multi-reg nodes. RefInfoListNode* removeListNode(GenTree* node, unsigned multiRegIdx); //------------------------------------------------------------------------ // GetRefPosition - retrieve the RefPosition for the given node // // Notes: // The Build methods use this helper to retrieve the RefPosition for child nodes // from the useList being constructed. Note that, if the user knows the order of the operands, // it is expected that they should just retrieve them directly. RefPosition* GetRefPosition(GenTree* node) { for (RefInfoListNode *listNode = Begin(), *end = End(); listNode != end; listNode = listNode->Next()) { if (listNode->treeNode == node) { return listNode->ref; } } assert(!"GetRefPosition didn't find the node"); unreached(); } //------------------------------------------------------------------------ // RefInfoList::GetSecond: Gets the second node in the list. // // Arguments: // (DEBUG ONLY) treeNode - The GenTree* we expect to be in the second node. // RefInfoListNode* GetSecond(INDEBUG(GenTree* treeNode)) { noway_assert((Begin() != nullptr) && (Begin()->Next() != nullptr)); RefInfoListNode* second = Begin()->Next(); assert(second->treeNode == treeNode); return second; } #ifdef DEBUG // Count - return the number of nodes in the list (DEBUG only) int Count() { int count = 0; for (RefInfoListNode *listNode = Begin(), *end = End(); listNode != end; listNode = listNode->Next()) { count++; } return count; } #endif // DEBUG }; //------------------------------------------------------------------------ // RefInfoListNodePool: manages a pool of `RefInfoListNode` // values to decrease overall memory usage // during `buildIntervals`. // // `buildIntervals` involves creating a list of RefInfo items per // node that either directly produces a set of registers or that is a // contained node with register-producing sources. However, these lists // are short-lived: they are destroyed once the use of the corresponding // node is processed. As such, there is typically only a small number of // `RefInfoListNode` values in use at any given time. Pooling these // values avoids otherwise frequent allocations. class RefInfoListNodePool final { RefInfoListNode* m_freeList; Compiler* m_compiler; static const unsigned defaultPreallocation = 8; public: RefInfoListNodePool(Compiler* compiler, unsigned preallocate = defaultPreallocation); RefInfoListNode* GetNode(RefPosition* r, GenTree* t); void ReturnNode(RefInfoListNode* listNode); }; #if TRACK_LSRA_STATS enum LsraStat { #define LSRA_STAT_DEF(enum_name, enum_str) enum_name, #include "lsra_stats.h" #undef LSRA_STAT_DEF #define REG_SEL_DEF(enum_name, value, short_str, orderSeqId) STAT_##enum_name, #include "lsra_score.h" #undef REG_SEL_DEF COUNT }; #endif // TRACK_LSRA_STATS struct LsraBlockInfo { // bbNum of the predecessor to use for the register location of live-in variables. // 0 for fgFirstBB. unsigned int predBBNum; weight_t weight; bool hasCriticalInEdge : 1; bool hasCriticalOutEdge : 1; bool hasEHBoundaryIn : 1; bool hasEHBoundaryOut : 1; bool hasEHPred : 1; #if TRACK_LSRA_STATS // Per block maintained LSRA statistics. unsigned stats[LsraStat::COUNT]; #endif // TRACK_LSRA_STATS }; enum RegisterScore { #define REG_SEL_DEF(enum_name, value, short_str, orderSeqId) enum_name = value, #include "lsra_score.h" #undef REG_SEL_DEF NONE = 0 }; // This is sort of a bit mask // The low order 2 bits will be 1 for defs, and 2 for uses enum RefType : unsigned char { #define DEF_REFTYPE(memberName, memberValue, shortName) memberName = memberValue, #include "lsra_reftypes.h" #undef DEF_REFTYPE }; // position in a block (for resolution) enum BlockStartOrEnd { BlockPositionStart = 0, BlockPositionEnd = 1, PositionCount = 2 }; inline bool RefTypeIsUse(RefType refType) { return ((refType & RefTypeUse) == RefTypeUse); } inline bool RefTypeIsDef(RefType refType) { return ((refType & RefTypeDef) == RefTypeDef); } typedef regNumberSmall* VarToRegMap; typedef jitstd::list<Interval> IntervalList; typedef jitstd::list<RefPosition> RefPositionList; typedef jitstd::list<RefPosition>::iterator RefPositionIterator; typedef jitstd::list<RefPosition>::reverse_iterator RefPositionReverseIterator; class Referenceable { public: Referenceable() { firstRefPosition = nullptr; recentRefPosition = nullptr; lastRefPosition = nullptr; } // A linked list of RefPositions. These are only traversed in the forward // direction, and are not moved, so they don't need to be doubly linked // (see RefPosition). RefPosition* firstRefPosition; RefPosition* recentRefPosition; RefPosition* lastRefPosition; // Get the position of the next reference which is at or greater than // the current location (relies upon recentRefPosition being udpated // during traversal). RefPosition* getNextRefPosition(); LsraLocation getNextRefLocation(); }; class RegRecord : public Referenceable { public: RegRecord() { assignedInterval = nullptr; previousInterval = nullptr; regNum = REG_NA; isCalleeSave = false; registerType = IntRegisterType; } void init(regNumber reg) { #ifdef TARGET_ARM64 // The Zero register, or the SP if ((reg == REG_ZR) || (reg == REG_SP)) { // IsGeneralRegister returns false for REG_ZR and REG_SP regNum = reg; registerType = IntRegisterType; } else #endif if (emitter::isFloatReg(reg)) { registerType = FloatRegisterType; } else { // The constructor defaults to IntRegisterType assert(emitter::isGeneralRegister(reg) && registerType == IntRegisterType); } regNum = reg; isCalleeSave = ((RBM_CALLEE_SAVED & genRegMask(reg)) != 0); } #ifdef DEBUG // print out representation void dump(); // concise representation for embedding void tinyDump(); #endif // DEBUG // DATA // interval to which this register is currently allocated. // If the interval is inactive (isActive == false) then it is not currently live, // and the register can be unassigned (i.e. setting assignedInterval to nullptr) // without spilling the register. Interval* assignedInterval; // Interval to which this register was previously allocated, and which was unassigned // because it was inactive. This register will be reassigned to this Interval when // assignedInterval becomes inactive. Interval* previousInterval; regNumber regNum; bool isCalleeSave; RegisterType registerType; unsigned char regOrder; }; inline bool leafInRange(GenTree* leaf, int lower, int upper) { if (!leaf->IsIntCnsFitsInI32()) { return false; } if (leaf->AsIntCon()->gtIconVal < lower) { return false; } if (leaf->AsIntCon()->gtIconVal > upper) { return false; } return true; } inline bool leafInRange(GenTree* leaf, int lower, int upper, int multiple) { if (!leafInRange(leaf, lower, upper)) { return false; } if (leaf->AsIntCon()->gtIconVal % multiple) { return false; } return true; } inline bool leafAddInRange(GenTree* leaf, int lower, int upper, int multiple = 1) { if (leaf->OperGet() != GT_ADD) { return false; } return leafInRange(leaf->gtGetOp2(), lower, upper, multiple); } inline bool isCandidateVar(const LclVarDsc* varDsc) { return varDsc->lvLRACandidate; } /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX LinearScan XX XX XX XX This is the container for the Linear Scan data structures and methods. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // OPTION 1: The algorithm as described in "Optimized Interval Splitting in a // Linear Scan Register Allocator". It is driven by iterating over the Interval // lists. In this case, we need multiple IntervalLists, and Intervals will be // moved between them so they must be easily updated. // OPTION 2: The algorithm is driven by iterating over the RefPositions. In this // case, we only need a single IntervalList, and it won't be updated. // The RefPosition must refer to its Interval, and we need to be able to traverse // to the next RefPosition in code order // THIS IS THE OPTION CURRENTLY BEING PURSUED class LinearScan : public LinearScanInterface { friend class RefPosition; friend class Interval; friend class Lowering; public: // This could use further abstraction. From Compiler we need the tree, // the flowgraph and the allocator. LinearScan(Compiler* theCompiler); // This is the main driver virtual void doLinearScan(); static bool isSingleRegister(regMaskTP regMask) { return (genExactlyOneBit(regMask)); } // Initialize the block traversal for LSRA. // This resets the bbVisitedSet, and on the first invocation sets the blockSequence array, // which determines the order in which blocks will be allocated (currently called during Lowering). BasicBlock* startBlockSequence(); // Move to the next block in sequence, updating the current block information. BasicBlock* moveToNextBlock(); // Get the next block to be scheduled without changing the current block, // but updating the blockSequence during the first iteration if it is not fully computed. BasicBlock* getNextBlock(); // This is called during code generation to update the location of variables virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb); // This does the dataflow analysis and builds the intervals void buildIntervals(); // This is where the actual assignment is done void allocateRegisters(); // This is the resolution phase, where cross-block mismatches are fixed up void resolveRegisters(); void writeRegisters(RefPosition* currentRefPosition, GenTree* tree); // Insert a copy in the case where a tree node value must be moved to a different // register at the point of use, or it is reloaded to a different register // than the one it was spilled from void insertCopyOrReload(BasicBlock* block, GenTree* tree, unsigned multiRegIdx, RefPosition* refPosition); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE void makeUpperVectorInterval(unsigned varIndex); Interval* getUpperVectorInterval(unsigned varIndex); // Save the upper half of a vector that lives in a callee-save register at the point of a call. void insertUpperVectorSave(GenTree* tree, RefPosition* refPosition, Interval* upperVectorInterval, BasicBlock* block); // Restore the upper half of a vector that's been partially spilled prior to a use in 'tree'. void insertUpperVectorRestore(GenTree* tree, RefPosition* refPosition, Interval* upperVectorInterval, BasicBlock* block); #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE // resolve along one block-block edge enum ResolveType { ResolveSplit, ResolveJoin, ResolveCritical, ResolveSharedCritical, ResolveTypeCount }; #ifdef DEBUG static const char* resolveTypeName[ResolveTypeCount]; #endif enum WhereToInsert { InsertAtTop, InsertAtBottom }; #ifdef TARGET_ARM void addResolutionForDouble(BasicBlock* block, GenTree* insertionPoint, Interval** sourceIntervals, regNumberSmall* location, regNumber toReg, regNumber fromReg, ResolveType resolveType); #endif void addResolution( BasicBlock* block, GenTree* insertionPoint, Interval* interval, regNumber outReg, regNumber inReg); void handleOutgoingCriticalEdges(BasicBlock* block); void resolveEdge(BasicBlock* fromBlock, BasicBlock* toBlock, ResolveType resolveType, VARSET_VALARG_TP liveSet); void resolveEdges(); // Keep track of how many temp locations we'll need for spill void initMaxSpill(); void updateMaxSpill(RefPosition* refPosition); void recordMaxSpill(); // max simultaneous spill locations used of every type unsigned int maxSpill[TYP_COUNT]; unsigned int currentSpill[TYP_COUNT]; bool needFloatTmpForFPCall; bool needDoubleTmpForFPCall; #ifdef DEBUG private: //------------------------------------------------------------------------ // Should we stress lsra? This uses the COMPlus_JitStressRegs variable. // // The mask bits are currently divided into fields in which each non-zero value // is a distinct stress option (e.g. 0x3 is not a combination of 0x1 and 0x2). // However, subject to possible constraints (to be determined), the different // fields can be combined (e.g. 0x7 is a combination of 0x3 and 0x4). // Note that the field values are declared in a public enum, but the actual bits are // only accessed via accessors. unsigned lsraStressMask; // This controls the registers available for allocation enum LsraStressLimitRegs{LSRA_LIMIT_NONE = 0, LSRA_LIMIT_CALLEE = 0x1, LSRA_LIMIT_CALLER = 0x2, LSRA_LIMIT_SMALL_SET = 0x3, LSRA_LIMIT_MASK = 0x3}; // When LSRA_LIMIT_SMALL_SET is specified, it is desirable to select a "mixed" set of caller- and callee-save // registers, so as to get different coverage than limiting to callee or caller. // At least for x86 and AMD64, and potentially other architecture that will support SIMD, // we need a minimum of 5 fp regs in order to support the InitN intrinsic for Vector4. // Hence the "SmallFPSet" has 5 elements. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI // On System V the RDI and RSI are not callee saved. Use R12 ans R13 as callee saved registers. static const regMaskTP LsraLimitSmallIntSet = (RBM_EAX | RBM_ECX | RBM_EBX | RBM_ETW_FRAMED_EBP | RBM_R12 | RBM_R13); #else // !UNIX_AMD64_ABI // On Windows Amd64 use the RDI and RSI as callee saved registers. static const regMaskTP LsraLimitSmallIntSet = (RBM_EAX | RBM_ECX | RBM_EBX | RBM_ETW_FRAMED_EBP | RBM_ESI | RBM_EDI); #endif // !UNIX_AMD64_ABI static const regMaskTP LsraLimitSmallFPSet = (RBM_XMM0 | RBM_XMM1 | RBM_XMM2 | RBM_XMM6 | RBM_XMM7); #elif defined(TARGET_ARM) // On ARM, we may need two registers to set up the target register for a virtual call, so we need // to have at least the maximum number of arg registers, plus 2. static const regMaskTP LsraLimitSmallIntSet = (RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4 | RBM_R5); static const regMaskTP LsraLimitSmallFPSet = (RBM_F0 | RBM_F1 | RBM_F2 | RBM_F16 | RBM_F17); #elif defined(TARGET_ARM64) static const regMaskTP LsraLimitSmallIntSet = (RBM_R0 | RBM_R1 | RBM_R2 | RBM_R19 | RBM_R20); static const regMaskTP LsraLimitSmallFPSet = (RBM_V0 | RBM_V1 | RBM_V2 | RBM_V8 | RBM_V9); #elif defined(TARGET_X86) static const regMaskTP LsraLimitSmallIntSet = (RBM_EAX | RBM_ECX | RBM_EDI); static const regMaskTP LsraLimitSmallFPSet = (RBM_XMM0 | RBM_XMM1 | RBM_XMM2 | RBM_XMM6 | RBM_XMM7); #else #error Unsupported or unset target architecture #endif // target LsraStressLimitRegs getStressLimitRegs() { return (LsraStressLimitRegs)(lsraStressMask & LSRA_LIMIT_MASK); } regMaskTP getConstrainedRegMask(regMaskTP regMaskActual, regMaskTP regMaskConstrain, unsigned minRegCount); regMaskTP stressLimitRegs(RefPosition* refPosition, regMaskTP mask); // This controls the heuristics used to select registers // These can be combined. enum LsraSelect{LSRA_SELECT_DEFAULT = 0, LSRA_SELECT_REVERSE_HEURISTICS = 0x04, LSRA_SELECT_REVERSE_CALLER_CALLEE = 0x08, LSRA_SELECT_NEAREST = 0x10, LSRA_SELECT_MASK = 0x1c}; LsraSelect getSelectionHeuristics() { return (LsraSelect)(lsraStressMask & LSRA_SELECT_MASK); } bool doReverseSelect() { return ((lsraStressMask & LSRA_SELECT_REVERSE_HEURISTICS) != 0); } bool doReverseCallerCallee() { return ((lsraStressMask & LSRA_SELECT_REVERSE_CALLER_CALLEE) != 0); } bool doSelectNearest() { return ((lsraStressMask & LSRA_SELECT_NEAREST) != 0); } // This controls the order in which basic blocks are visited during allocation enum LsraTraversalOrder{LSRA_TRAVERSE_LAYOUT = 0x20, LSRA_TRAVERSE_PRED_FIRST = 0x40, LSRA_TRAVERSE_RANDOM = 0x60, // NYI LSRA_TRAVERSE_DEFAULT = LSRA_TRAVERSE_PRED_FIRST, LSRA_TRAVERSE_MASK = 0x60}; LsraTraversalOrder getLsraTraversalOrder() { if ((lsraStressMask & LSRA_TRAVERSE_MASK) == 0) { return LSRA_TRAVERSE_DEFAULT; } return (LsraTraversalOrder)(lsraStressMask & LSRA_TRAVERSE_MASK); } bool isTraversalLayoutOrder() { return getLsraTraversalOrder() == LSRA_TRAVERSE_LAYOUT; } bool isTraversalPredFirstOrder() { return getLsraTraversalOrder() == LSRA_TRAVERSE_PRED_FIRST; } // This controls whether lifetimes should be extended to the entire method. // Note that this has no effect under MinOpts enum LsraExtendLifetimes{LSRA_DONT_EXTEND = 0, LSRA_EXTEND_LIFETIMES = 0x80, LSRA_EXTEND_LIFETIMES_MASK = 0x80}; LsraExtendLifetimes getLsraExtendLifeTimes() { return (LsraExtendLifetimes)(lsraStressMask & LSRA_EXTEND_LIFETIMES_MASK); } bool extendLifetimes() { return getLsraExtendLifeTimes() == LSRA_EXTEND_LIFETIMES; } // This controls whether variables locations should be set to the previous block in layout order // (LSRA_BLOCK_BOUNDARY_LAYOUT), or to that of the highest-weight predecessor (LSRA_BLOCK_BOUNDARY_PRED - // the default), or rotated (LSRA_BLOCK_BOUNDARY_ROTATE). enum LsraBlockBoundaryLocations{LSRA_BLOCK_BOUNDARY_PRED = 0, LSRA_BLOCK_BOUNDARY_LAYOUT = 0x100, LSRA_BLOCK_BOUNDARY_ROTATE = 0x200, LSRA_BLOCK_BOUNDARY_MASK = 0x300}; LsraBlockBoundaryLocations getLsraBlockBoundaryLocations() { return (LsraBlockBoundaryLocations)(lsraStressMask & LSRA_BLOCK_BOUNDARY_MASK); } regNumber rotateBlockStartLocation(Interval* interval, regNumber targetReg, regMaskTP availableRegs); // This controls whether we always insert a GT_RELOAD instruction after a spill // Note that this can be combined with LSRA_SPILL_ALWAYS (or not) enum LsraReload{LSRA_NO_RELOAD_IF_SAME = 0, LSRA_ALWAYS_INSERT_RELOAD = 0x400, LSRA_RELOAD_MASK = 0x400}; LsraReload getLsraReload() { return (LsraReload)(lsraStressMask & LSRA_RELOAD_MASK); } bool alwaysInsertReload() { return getLsraReload() == LSRA_ALWAYS_INSERT_RELOAD; } // This controls whether we spill everywhere enum LsraSpill{LSRA_DONT_SPILL_ALWAYS = 0, LSRA_SPILL_ALWAYS = 0x800, LSRA_SPILL_MASK = 0x800}; LsraSpill getLsraSpill() { return (LsraSpill)(lsraStressMask & LSRA_SPILL_MASK); } bool spillAlways() { return getLsraSpill() == LSRA_SPILL_ALWAYS; } // This controls whether RefPositions that lower/codegen indicated as reg optional be // allocated a reg at all. enum LsraRegOptionalControl{LSRA_REG_OPTIONAL_DEFAULT = 0, LSRA_REG_OPTIONAL_NO_ALLOC = 0x1000, LSRA_REG_OPTIONAL_MASK = 0x1000}; LsraRegOptionalControl getLsraRegOptionalControl() { return (LsraRegOptionalControl)(lsraStressMask & LSRA_REG_OPTIONAL_MASK); } bool regOptionalNoAlloc() { return getLsraRegOptionalControl() == LSRA_REG_OPTIONAL_NO_ALLOC; } bool candidatesAreStressLimited() { return ((lsraStressMask & (LSRA_LIMIT_MASK | LSRA_SELECT_MASK)) != 0); } // Dump support void dumpDefList(); void lsraDumpIntervals(const char* msg); void dumpRefPositions(const char* msg); void dumpVarRefPositions(const char* msg); // Checking code static bool IsLsraAdded(GenTree* node) { return ((node->gtDebugFlags & GTF_DEBUG_NODE_LSRA_ADDED) != 0); } static void SetLsraAdded(GenTree* node) { node->gtDebugFlags |= GTF_DEBUG_NODE_LSRA_ADDED; } static bool IsResolutionMove(GenTree* node); static bool IsResolutionNode(LIR::Range& containingRange, GenTree* node); void verifyFinalAllocation(); void verifyResolutionMove(GenTree* resolutionNode, LsraLocation currentLocation); #else // !DEBUG bool doSelectNearest() { return false; } bool extendLifetimes() { return false; } bool spillAlways() { return false; } // In a retail build we support only the default traversal order bool isTraversalLayoutOrder() { return false; } bool isTraversalPredFirstOrder() { return true; } bool getLsraExtendLifeTimes() { return false; } static void SetLsraAdded(GenTree* node) { // do nothing; checked only under #DEBUG } bool candidatesAreStressLimited() { return false; } #endif // !DEBUG public: // Used by Lowering when considering whether to split Longs, as well as by identifyCandidates(). bool isRegCandidate(LclVarDsc* varDsc); bool isContainableMemoryOp(GenTree* node); private: // Determine which locals are candidates for allocation void identifyCandidates(); // determine which locals are used in EH constructs we don't want to deal with void identifyCandidatesExceptionDataflow(); void buildPhysRegRecords(); #ifdef DEBUG void checkLastUses(BasicBlock* block); int ComputeOperandDstCount(GenTree* operand); int ComputeAvailableSrcCount(GenTree* node); #endif // DEBUG void setFrameType(); // Update allocations at start/end of block void unassignIntervalBlockStart(RegRecord* regRecord, VarToRegMap inVarToRegMap); void processBlockEndAllocation(BasicBlock* current); // Record variable locations at start/end of block void processBlockStartLocations(BasicBlock* current); void processBlockEndLocations(BasicBlock* current); #ifdef TARGET_ARM bool isSecondHalfReg(RegRecord* regRec, Interval* interval); RegRecord* getSecondHalfRegRec(RegRecord* regRec); RegRecord* findAnotherHalfRegRec(RegRecord* regRec); regNumber findAnotherHalfRegNum(regNumber regNum); bool canSpillDoubleReg(RegRecord* physRegRecord, LsraLocation refLocation); void unassignDoublePhysReg(RegRecord* doubleRegRecord); #endif void updateAssignedInterval(RegRecord* reg, Interval* interval, RegisterType regType); void updatePreviousInterval(RegRecord* reg, Interval* interval, RegisterType regType); bool canRestorePreviousInterval(RegRecord* regRec, Interval* assignedInterval); bool isAssignedToInterval(Interval* interval, RegRecord* regRec); bool isRefPositionActive(RefPosition* refPosition, LsraLocation refLocation); bool canSpillReg(RegRecord* physRegRecord, LsraLocation refLocation); weight_t getSpillWeight(RegRecord* physRegRecord); // insert refpositions representing prolog zero-inits which will be added later void insertZeroInitRefPositions(); // add physreg refpositions for a tree node, based on calling convention and instruction selection predictions void addRefsForPhysRegMask(regMaskTP mask, LsraLocation currentLoc, RefType refType, bool isLastUse); void resolveConflictingDefAndUse(Interval* interval, RefPosition* defRefPosition); void buildRefPositionsForNode(GenTree* tree, LsraLocation loc); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE void buildUpperVectorSaveRefPositions(GenTree* tree, LsraLocation currentLoc, regMaskTP fpCalleeKillSet); void buildUpperVectorRestoreRefPosition(Interval* lclVarInterval, LsraLocation currentLoc, GenTree* node, bool isUse); #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE #if defined(UNIX_AMD64_ABI) // For AMD64 on SystemV machines. This method // is called as replacement for raUpdateRegStateForArg // that is used on Windows. On System V systems a struct can be passed // partially using registers from the 2 register files. void unixAmd64UpdateRegStateForArg(LclVarDsc* argDsc); #endif // defined(UNIX_AMD64_ABI) // Update reg state for an incoming register argument void updateRegStateForArg(LclVarDsc* argDsc); inline bool isCandidateLocalRef(GenTree* tree) { if (tree->IsLocal()) { const LclVarDsc* varDsc = compiler->lvaGetDesc(tree->AsLclVarCommon()); return isCandidateVar(varDsc); } return false; } // Helpers for getKillSetForNode(). regMaskTP getKillSetForStoreInd(GenTreeStoreInd* tree); regMaskTP getKillSetForShiftRotate(GenTreeOp* tree); regMaskTP getKillSetForMul(GenTreeOp* tree); regMaskTP getKillSetForCall(GenTreeCall* call); regMaskTP getKillSetForModDiv(GenTreeOp* tree); regMaskTP getKillSetForBlockStore(GenTreeBlk* blkNode); regMaskTP getKillSetForReturn(); regMaskTP getKillSetForProfilerHook(); #ifdef FEATURE_HW_INTRINSICS regMaskTP getKillSetForHWIntrinsic(GenTreeHWIntrinsic* node); #endif // FEATURE_HW_INTRINSICS // Return the registers killed by the given tree node. // This is used only for an assert, and for stress, so it is only defined under DEBUG. // Otherwise, the Build methods should obtain the killMask from the appropriate method above. #ifdef DEBUG regMaskTP getKillSetForNode(GenTree* tree); #endif // Given some tree node add refpositions for all the registers this node kills bool buildKillPositionsForNode(GenTree* tree, LsraLocation currentLoc, regMaskTP killMask); regMaskTP allRegs(RegisterType rt); regMaskTP allByteRegs(); regMaskTP allSIMDRegs(); regMaskTP internalFloatRegCandidates(); void makeRegisterInactive(RegRecord* physRegRecord); void freeRegister(RegRecord* physRegRecord); void freeRegisters(regMaskTP regsToFree); // Get the type that this tree defines. var_types getDefType(GenTree* tree) { var_types type = tree->TypeGet(); if (type == TYP_STRUCT) { assert(tree->OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR)); GenTreeLclVar* lclVar = tree->AsLclVar(); LclVarDsc* varDsc = compiler->lvaGetDesc(lclVar); type = varDsc->GetRegisterType(lclVar); } assert(type != TYP_UNDEF && type != TYP_STRUCT); return type; } // Managing internal registers during the BuildNode process. RefPosition* defineNewInternalTemp(GenTree* tree, RegisterType regType, regMaskTP candidates); RefPosition* buildInternalIntRegisterDefForNode(GenTree* tree, regMaskTP internalCands = RBM_NONE); RefPosition* buildInternalFloatRegisterDefForNode(GenTree* tree, regMaskTP internalCands = RBM_NONE); void buildInternalRegisterUses(); void writeLocalReg(GenTreeLclVar* lclNode, unsigned varNum, regNumber reg); void resolveLocalRef(BasicBlock* block, GenTreeLclVar* treeNode, RefPosition* currentRefPosition); void insertMove(BasicBlock* block, GenTree* insertionPoint, unsigned lclNum, regNumber inReg, regNumber outReg); void insertSwap( BasicBlock* block, GenTree* insertionPoint, unsigned lclNum1, regNumber reg1, unsigned lclNum2, regNumber reg2); private: Interval* newInterval(RegisterType regType); Interval* getIntervalForLocalVar(unsigned varIndex) { assert(varIndex < compiler->lvaTrackedCount); assert(localVarIntervals[varIndex] != nullptr); return localVarIntervals[varIndex]; } Interval* getIntervalForLocalVarNode(GenTreeLclVarCommon* tree) { const LclVarDsc* varDsc = compiler->lvaGetDesc(tree); assert(varDsc->lvTracked); return getIntervalForLocalVar(varDsc->lvVarIndex); } RegRecord* getRegisterRecord(regNumber regNum); RefPosition* newRefPositionRaw(LsraLocation nodeLocation, GenTree* treeNode, RefType refType); RefPosition* newRefPosition(Interval* theInterval, LsraLocation theLocation, RefType theRefType, GenTree* theTreeNode, regMaskTP mask, unsigned multiRegIdx = 0); // This creates a RefTypeUse at currentLoc. It sets the treeNode to nullptr if it is not a // lclVar interval. RefPosition* newUseRefPosition(Interval* theInterval, GenTree* theTreeNode, regMaskTP mask, unsigned multiRegIdx = 0); RefPosition* newRefPosition( regNumber reg, LsraLocation theLocation, RefType theRefType, GenTree* theTreeNode, regMaskTP mask); void applyCalleeSaveHeuristics(RefPosition* rp); void checkConflictingDefUse(RefPosition* rp); void associateRefPosWithInterval(RefPosition* rp); weight_t getWeight(RefPosition* refPos); /***************************************************************************** * Register management ****************************************************************************/ RegisterType getRegisterType(Interval* currentInterval, RefPosition* refPosition); #ifdef DEBUG const char* getScoreName(RegisterScore score); #endif regNumber allocateReg(Interval* current, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)); regNumber assignCopyReg(RefPosition* refPosition); bool isMatchingConstant(RegRecord* physRegRecord, RefPosition* refPosition); bool isSpillCandidate(Interval* current, RefPosition* refPosition, RegRecord* physRegRecord); void checkAndAssignInterval(RegRecord* regRec, Interval* interval); void assignPhysReg(RegRecord* regRec, Interval* interval); void assignPhysReg(regNumber reg, Interval* interval) { assignPhysReg(getRegisterRecord(reg), interval); } bool isAssigned(RegRecord* regRec ARM_ARG(RegisterType newRegType)); void checkAndClearInterval(RegRecord* regRec, RefPosition* spillRefPosition); void unassignPhysReg(RegRecord* regRec ARM_ARG(RegisterType newRegType)); void unassignPhysReg(RegRecord* regRec, RefPosition* spillRefPosition); void unassignPhysRegNoSpill(RegRecord* reg); void unassignPhysReg(regNumber reg) { unassignPhysReg(getRegisterRecord(reg), nullptr); } void setIntervalAsSpilled(Interval* interval); void setIntervalAsSplit(Interval* interval); void spillInterval(Interval* interval, RefPosition* fromRefPosition DEBUGARG(RefPosition* toRefPosition)); void spillGCRefs(RefPosition* killRefPosition); /***************************************************************************** * Register selection ****************************************************************************/ regMaskTP getFreeCandidates(regMaskTP candidates, var_types regType) { regMaskTP result = candidates & m_AvailableRegs; #ifdef TARGET_ARM // For TYP_DOUBLE on ARM, we can only use register for which the odd half is // also available. if (regType == TYP_DOUBLE) { result &= (m_AvailableRegs >> 1); } #endif // TARGET_ARM return result; } #ifdef DEBUG class RegisterSelection; // For lsra ordering experimentation typedef void (LinearScan::RegisterSelection::*HeuristicFn)(); typedef JitHashTable<RegisterScore, JitSmallPrimitiveKeyFuncs<RegisterScore>, HeuristicFn> ScoreMappingTable; #define REGSELECT_HEURISTIC_COUNT 17 #endif class RegisterSelection { public: RegisterSelection(LinearScan* linearScan); // Perform register selection and update currentInterval or refPosition FORCEINLINE regMaskTP select(Interval* currentInterval, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)); // If the register is from unassigned set such that it was not already // assigned to the current interval FORCEINLINE bool foundUnassignedReg() { assert(found && isSingleRegister(foundRegBit)); bool isUnassignedReg = ((foundRegBit & unassignedSet) != RBM_NONE); return isUnassignedReg && !isAlreadyAssigned(); } // Did register selector decide to spill this interval FORCEINLINE bool isSpilling() { return (foundRegBit & freeCandidates) == RBM_NONE; } // Is the value one of the constant that is already in a register FORCEINLINE bool isMatchingConstant() { assert(found && isSingleRegister(foundRegBit)); return (matchingConstants & foundRegBit) != RBM_NONE; } // Did we apply CONST_AVAILABLE heuristics FORCEINLINE bool isConstAvailable() { return (score & CONST_AVAILABLE) != 0; } private: #ifdef DEBUG RegisterScore RegSelectionOrder[REGSELECT_HEURISTIC_COUNT] = {NONE}; ScoreMappingTable* mappingTable = nullptr; #endif LinearScan* linearScan = nullptr; int score = 0; Interval* currentInterval = nullptr; RefPosition* refPosition = nullptr; RegisterType regType = RegisterType::TYP_UNKNOWN; LsraLocation currentLocation = MinLocation; RefPosition* nextRefPos = nullptr; regMaskTP candidates; regMaskTP preferences = RBM_NONE; Interval* relatedInterval = nullptr; regMaskTP relatedPreferences = RBM_NONE; LsraLocation rangeEndLocation; LsraLocation relatedLastLocation; bool preferCalleeSave = false; RefPosition* rangeEndRefPosition; RefPosition* lastRefPosition; regMaskTP callerCalleePrefs = RBM_NONE; LsraLocation lastLocation; RegRecord* prevRegRec = nullptr; regMaskTP prevRegBit = RBM_NONE; // These are used in the post-selection updates, and must be set for any selection. regMaskTP freeCandidates; regMaskTP matchingConstants; regMaskTP unassignedSet; regMaskTP foundRegBit; // Compute the sets for COVERS, OWN_PREFERENCE, COVERS_RELATED, COVERS_FULL and UNASSIGNED together, // as they all require similar computation. regMaskTP coversSet; regMaskTP preferenceSet; regMaskTP coversRelatedSet; regMaskTP coversFullSet; bool coversSetsCalculated = false; bool found = false; bool skipAllocation = false; regNumber foundReg = REG_NA; // If the selected register is already assigned to the current internal FORCEINLINE bool isAlreadyAssigned() { assert(found && isSingleRegister(candidates)); return (prevRegBit & preferences) == foundRegBit; } bool applySelection(int selectionScore, regMaskTP selectionCandidates); bool applySingleRegSelection(int selectionScore, regMaskTP selectionCandidate); FORCEINLINE void calculateCoversSets(); FORCEINLINE void reset(Interval* interval, RefPosition* refPosition); #define REG_SEL_DEF(stat, value, shortname, orderSeqId) FORCEINLINE void try_##stat(); #include "lsra_score.h" #undef REG_SEL_DEF }; RegisterSelection* regSelector; /***************************************************************************** * For Resolution phase ****************************************************************************/ // TODO-Throughput: Consider refactoring this so that we keep a map from regs to vars for better scaling unsigned int regMapCount; // When we split edges, we create new blocks, and instead of expanding the VarToRegMaps, we // rely on the property that the "in" map is the same as the "from" block of the edge, and the // "out" map is the same as the "to" block of the edge (by construction). // So, for any block whose bbNum is greater than bbNumMaxBeforeResolution, we use the // splitBBNumToTargetBBNumMap. // TODO-Throughput: We may want to look into the cost/benefit tradeoff of doing this vs. expanding // the arrays. unsigned bbNumMaxBeforeResolution; struct SplitEdgeInfo { unsigned fromBBNum; unsigned toBBNum; }; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, SplitEdgeInfo> SplitBBNumToTargetBBNumMap; SplitBBNumToTargetBBNumMap* splitBBNumToTargetBBNumMap; SplitBBNumToTargetBBNumMap* getSplitBBNumToTargetBBNumMap() { if (splitBBNumToTargetBBNumMap == nullptr) { splitBBNumToTargetBBNumMap = new (getAllocator(compiler)) SplitBBNumToTargetBBNumMap(getAllocator(compiler)); } return splitBBNumToTargetBBNumMap; } SplitEdgeInfo getSplitEdgeInfo(unsigned int bbNum); void initVarRegMaps(); void setInVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg); void setOutVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg); VarToRegMap getInVarToRegMap(unsigned int bbNum); VarToRegMap getOutVarToRegMap(unsigned int bbNum); void setVarReg(VarToRegMap map, unsigned int trackedVarIndex, regNumber reg); regNumber getVarReg(VarToRegMap map, unsigned int trackedVarIndex); // Initialize the incoming VarToRegMap to the given map values (generally a predecessor of // the block) VarToRegMap setInVarToRegMap(unsigned int bbNum, VarToRegMap srcVarToRegMap); regNumber getTempRegForResolution(BasicBlock* fromBlock, BasicBlock* toBlock, var_types type); #ifdef DEBUG void dumpVarToRegMap(VarToRegMap map); void dumpInVarToRegMap(BasicBlock* block); void dumpOutVarToRegMap(BasicBlock* block); // There are three points at which a tuple-style dump is produced, and each // differs slightly: // - In LSRA_DUMP_PRE, it does a simple dump of each node, with indications of what // tree nodes are consumed. // - In LSRA_DUMP_REFPOS, which is after the intervals are built, but before // register allocation, each node is dumped, along with all of the RefPositions, // The Intervals are identifed as Lnnn for lclVar intervals, Innn for for other // intervals, and Tnnn for internal temps. // - In LSRA_DUMP_POST, which is after register allocation, the registers are // shown. enum LsraTupleDumpMode{LSRA_DUMP_PRE, LSRA_DUMP_REFPOS, LSRA_DUMP_POST}; void lsraGetOperandString(GenTree* tree, LsraTupleDumpMode mode, char* operandString, unsigned operandStringLength); void lsraDispNode(GenTree* tree, LsraTupleDumpMode mode, bool hasDest); void DumpOperandDefs( GenTree* operand, bool& first, LsraTupleDumpMode mode, char* operandString, const unsigned operandStringLength); void TupleStyleDump(LsraTupleDumpMode mode); LsraLocation maxNodeLocation; // Width of various fields - used to create a streamlined dump during allocation that shows the // state of all the registers in columns. int regColumnWidth; int regTableIndent; const char* columnSeparator; const char* line; const char* leftBox; const char* middleBox; const char* rightBox; static const int MAX_FORMAT_CHARS = 12; char intervalNameFormat[MAX_FORMAT_CHARS]; char regNameFormat[MAX_FORMAT_CHARS]; char shortRefPositionFormat[MAX_FORMAT_CHARS]; char emptyRefPositionFormat[MAX_FORMAT_CHARS]; char indentFormat[MAX_FORMAT_CHARS]; static const int MAX_LEGEND_FORMAT_CHARS = 25; char bbRefPosFormat[MAX_LEGEND_FORMAT_CHARS]; char legendFormat[MAX_LEGEND_FORMAT_CHARS]; // How many rows have we printed since last printing a "title row"? static const int MAX_ROWS_BETWEEN_TITLES = 50; int rowCountSinceLastTitle; // Current mask of registers being printed in the dump. regMaskTP lastDumpedRegisters; regMaskTP registersToDump; int lastUsedRegNumIndex; bool shouldDumpReg(regNumber regNum) { return (registersToDump & genRegMask(regNum)) != 0; } void dumpRegRecordHeader(); void dumpRegRecordTitle(); void dumpRegRecordTitleIfNeeded(); void dumpRegRecordTitleLines(); void dumpRegRecords(); void dumpNewBlock(BasicBlock* currentBlock, LsraLocation location); // An abbreviated RefPosition dump for printing with column-based register state void dumpRefPositionShort(RefPosition* refPosition, BasicBlock* currentBlock); // Print the number of spaces occupied by a dumpRefPositionShort() void dumpEmptyRefPosition(); // A dump of Referent, in exactly regColumnWidth characters void dumpIntervalName(Interval* interval); // Events during the allocation phase that cause some dump output enum LsraDumpEvent{ // Conflicting def/use LSRA_EVENT_DEFUSE_CONFLICT, LSRA_EVENT_DEFUSE_FIXED_DELAY_USE, LSRA_EVENT_DEFUSE_CASE1, LSRA_EVENT_DEFUSE_CASE2, LSRA_EVENT_DEFUSE_CASE3, LSRA_EVENT_DEFUSE_CASE4, LSRA_EVENT_DEFUSE_CASE5, LSRA_EVENT_DEFUSE_CASE6, // Spilling LSRA_EVENT_SPILL, LSRA_EVENT_SPILL_EXTENDED_LIFETIME, LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL, LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL_AFTER_SPILL, LSRA_EVENT_DONE_KILL_GC_REFS, LSRA_EVENT_NO_GC_KILLS, // Block boundaries LSRA_EVENT_START_BB, LSRA_EVENT_END_BB, // Miscellaneous LSRA_EVENT_FREE_REGS, LSRA_EVENT_UPPER_VECTOR_SAVE, LSRA_EVENT_UPPER_VECTOR_RESTORE, // Characteristics of the current RefPosition LSRA_EVENT_INCREMENT_RANGE_END, // ??? LSRA_EVENT_LAST_USE, LSRA_EVENT_LAST_USE_DELAYED, LSRA_EVENT_NEEDS_NEW_REG, // Allocation decisions LSRA_EVENT_FIXED_REG, LSRA_EVENT_EXP_USE, LSRA_EVENT_ZERO_REF, LSRA_EVENT_NO_ENTRY_REG_ALLOCATED, LSRA_EVENT_KEPT_ALLOCATION, LSRA_EVENT_COPY_REG, LSRA_EVENT_MOVE_REG, LSRA_EVENT_ALLOC_REG, LSRA_EVENT_NO_REG_ALLOCATED, LSRA_EVENT_RELOAD, LSRA_EVENT_SPECIAL_PUTARG, LSRA_EVENT_REUSE_REG, }; void dumpLsraAllocationEvent(LsraDumpEvent event, Interval* interval = nullptr, regNumber reg = REG_NA, BasicBlock* currentBlock = nullptr, RegisterScore registerScore = NONE); void validateIntervals(); #endif // DEBUG #if TRACK_LSRA_STATS unsigned regCandidateVarCount; void updateLsraStat(LsraStat stat, unsigned currentBBNum); void dumpLsraStats(FILE* file); LsraStat getLsraStatFromScore(RegisterScore registerScore); LsraStat firstRegSelStat = STAT_FREE; public: virtual void dumpLsraStatsCsv(FILE* file); virtual void dumpLsraStatsSummary(FILE* file); static const char* getStatName(unsigned stat); #define INTRACK_STATS(x) x #define INTRACK_STATS_IF(condition, work) \ if (condition) \ { \ work; \ } #else // !TRACK_LSRA_STATS #define INTRACK_STATS(x) #define INTRACK_STATS_IF(condition, work) #endif // !TRACK_LSRA_STATS private: Compiler* compiler; CompAllocator getAllocator(Compiler* comp) { return comp->getAllocator(CMK_LSRA); } #ifdef DEBUG // This is used for dumping RefPosition* activeRefPosition; #endif // DEBUG IntervalList intervals; RegRecord physRegs[REG_COUNT]; // Map from tracked variable index to Interval*. Interval** localVarIntervals; // Set of blocks that have been visited. BlockSet bbVisitedSet; void markBlockVisited(BasicBlock* block) { BlockSetOps::AddElemD(compiler, bbVisitedSet, block->bbNum); } void clearVisitedBlocks() { BlockSetOps::ClearD(compiler, bbVisitedSet); } bool isBlockVisited(BasicBlock* block) { return BlockSetOps::IsMember(compiler, bbVisitedSet, block->bbNum); } #if DOUBLE_ALIGN bool doDoubleAlign; #endif // A map from bbNum to the block information used during register allocation. LsraBlockInfo* blockInfo; BasicBlock* findPredBlockForLiveIn(BasicBlock* block, BasicBlock* prevBlock DEBUGARG(bool* pPredBlockIsAllocated)); // The order in which the blocks will be allocated. // This is any array of BasicBlock*, in the order in which they should be traversed. BasicBlock** blockSequence; // The verifiedAllBBs flag indicates whether we have verified that all BBs have been // included in the blockSeuqence above, during setBlockSequence(). bool verifiedAllBBs; void setBlockSequence(); int compareBlocksForSequencing(BasicBlock* block1, BasicBlock* block2, bool useBlockWeights); BasicBlockList* blockSequenceWorkList; bool blockSequencingDone; #ifdef DEBUG // LSRA must not change number of blocks and blockEpoch that it initializes at start. unsigned blockEpoch; #endif // DEBUG void addToBlockSequenceWorkList(BlockSet sequencedBlockSet, BasicBlock* block, BlockSet& predSet); void removeFromBlockSequenceWorkList(BasicBlockList* listNode, BasicBlockList* prevNode); BasicBlock* getNextCandidateFromWorkList(); // Indicates whether the allocation pass has been completed. bool allocationPassComplete; // The bbNum of the block being currently allocated or resolved. unsigned int curBBNum; // The current location LsraLocation currentLoc; // The first location in a cold or funclet block. LsraLocation firstColdLoc; // The ordinal of the block we're on (i.e. this is the curBBSeqNum-th block we've allocated). unsigned int curBBSeqNum; // The number of blocks that we've sequenced. unsigned int bbSeqCount; // The Location of the start of the current block. LsraLocation curBBStartLocation; // True if the method contains any critical edges. bool hasCriticalEdges; // True if there are any register candidate lclVars available for allocation. bool enregisterLocalVars; virtual bool willEnregisterLocalVars() const { return enregisterLocalVars; } // Ordered list of RefPositions RefPositionList refPositions; // Per-block variable location mappings: an array indexed by block number that yields a // pointer to an array of regNumber, one per variable. VarToRegMap* inVarToRegMaps; VarToRegMap* outVarToRegMaps; // A temporary VarToRegMap used during the resolution of critical edges. VarToRegMap sharedCriticalVarToRegMap; PhasedVar<regMaskTP> availableIntRegs; PhasedVar<regMaskTP> availableFloatRegs; PhasedVar<regMaskTP> availableDoubleRegs; // The set of all register candidates. Note that this may be a subset of tracked vars. VARSET_TP registerCandidateVars; // Current set of live register candidate vars, used during building of RefPositions to determine // whether to preference to callee-save. VARSET_TP currentLiveVars; // Set of variables that may require resolution across an edge. // This is first constructed during interval building, to contain all the lclVars that are live at BB edges. // Then, any lclVar that is always in the same register is removed from the set. VARSET_TP resolutionCandidateVars; // This set contains all the lclVars that are ever spilled or split. VARSET_TP splitOrSpilledVars; // Set of floating point variables to consider for callee-save registers. VARSET_TP fpCalleeSaveCandidateVars; // Set of variables exposed on EH flow edges. VARSET_TP exceptVars; // Set of variables exposed on finally edges. These must be zero-init if they are refs or if compInitMem is true. VARSET_TP finallyVars; #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE #if defined(TARGET_AMD64) static const var_types LargeVectorSaveType = TYP_SIMD16; #elif defined(TARGET_ARM64) static const var_types LargeVectorSaveType = TYP_DOUBLE; #endif // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) // Set of large vector (TYP_SIMD32 on AVX) variables. VARSET_TP largeVectorVars; // Set of large vector (TYP_SIMD32 on AVX) variables to consider for callee-save registers. VARSET_TP largeVectorCalleeSaveCandidateVars; #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE //----------------------------------------------------------------------- // Register status //----------------------------------------------------------------------- regMaskTP m_AvailableRegs; regNumber getRegForType(regNumber reg, var_types regType) { #ifdef TARGET_ARM if ((regType == TYP_DOUBLE) && !genIsValidDoubleReg(reg)) { reg = REG_PREV(reg); } #endif // TARGET_ARM return reg; } regMaskTP getRegMask(regNumber reg, var_types regType) { reg = getRegForType(reg, regType); regMaskTP regMask = genRegMask(reg); #ifdef TARGET_ARM if (regType == TYP_DOUBLE) { assert(genIsValidDoubleReg(reg)); regMask |= (regMask << 1); } #endif // TARGET_ARM return regMask; } void resetAvailableRegs() { m_AvailableRegs = (availableIntRegs | availableFloatRegs); m_RegistersWithConstants = RBM_NONE; } bool isRegAvailable(regNumber reg, var_types regType) { regMaskTP regMask = getRegMask(reg, regType); return (m_AvailableRegs & regMask) == regMask; } void setRegsInUse(regMaskTP regMask) { m_AvailableRegs &= ~regMask; } void setRegInUse(regNumber reg, var_types regType) { regMaskTP regMask = getRegMask(reg, regType); setRegsInUse(regMask); } void makeRegsAvailable(regMaskTP regMask) { m_AvailableRegs |= regMask; } void makeRegAvailable(regNumber reg, var_types regType) { regMaskTP regMask = getRegMask(reg, regType); makeRegsAvailable(regMask); } void clearNextIntervalRef(regNumber reg, var_types regType); void updateNextIntervalRef(regNumber reg, Interval* interval); void clearSpillCost(regNumber reg, var_types regType); void updateSpillCost(regNumber reg, Interval* interval); regMaskTP m_RegistersWithConstants; void clearConstantReg(regNumber reg, var_types regType) { m_RegistersWithConstants &= ~getRegMask(reg, regType); } void setConstantReg(regNumber reg, var_types regType) { m_RegistersWithConstants |= getRegMask(reg, regType); } bool isRegConstant(regNumber reg, var_types regType) { reg = getRegForType(reg, regType); regMaskTP regMask = getRegMask(reg, regType); return (m_RegistersWithConstants & regMask) == regMask; } regMaskTP getMatchingConstants(regMaskTP mask, Interval* currentInterval, RefPosition* refPosition); regMaskTP fixedRegs; LsraLocation nextFixedRef[REG_COUNT]; void updateNextFixedRef(RegRecord* regRecord, RefPosition* nextRefPosition); LsraLocation getNextFixedRef(regNumber regNum, var_types regType) { LsraLocation loc = nextFixedRef[regNum]; #ifdef TARGET_ARM if (regType == TYP_DOUBLE) { loc = Min(loc, nextFixedRef[regNum + 1]); } #endif return loc; } LsraLocation nextIntervalRef[REG_COUNT]; LsraLocation getNextIntervalRef(regNumber regNum, var_types regType) { LsraLocation loc = nextIntervalRef[regNum]; #ifdef TARGET_ARM if (regType == TYP_DOUBLE) { loc = Min(loc, nextIntervalRef[regNum + 1]); } #endif return loc; } weight_t spillCost[REG_COUNT]; regMaskTP regsBusyUntilKill; regMaskTP regsInUseThisLocation; regMaskTP regsInUseNextLocation; bool isRegBusy(regNumber reg, var_types regType) { regMaskTP regMask = getRegMask(reg, regType); return (regsBusyUntilKill & regMask) != RBM_NONE; } void setRegBusyUntilKill(regNumber reg, var_types regType) { regsBusyUntilKill |= getRegMask(reg, regType); } void clearRegBusyUntilKill(regNumber reg) { regsBusyUntilKill &= ~genRegMask(reg); } bool isRegInUse(regNumber reg, var_types regType) { regMaskTP regMask = getRegMask(reg, regType); return (regsInUseThisLocation & regMask) != RBM_NONE; } void resetRegState() { resetAvailableRegs(); regsBusyUntilKill = RBM_NONE; } bool conflictingFixedRegReference(regNumber regNum, RefPosition* refPosition); // This method should not be used and is here to retain old behavior. // It should be replaced by isRegAvailable(). // See comment in allocateReg(); bool isFree(RegRecord* regRecord); //----------------------------------------------------------------------- // Build methods //----------------------------------------------------------------------- // The listNodePool is used to maintain the RefInfo for nodes that are "in flight" // i.e. whose consuming node has not yet been handled. RefInfoListNodePool listNodePool; // When Def RefPositions are built for a node, their RefInfoListNode // (GenTree* to RefPosition* mapping) is placed in the defList. // As the consuming node is handled, it removes the RefInfoListNode from the // defList, use the interval associated with the corresponding Def RefPosition and // use it to build the Use RefPosition. RefInfoList defList; // As we build uses, we may want to preference the next definition (i.e. the register produced // by the current node) to the same register as one of its uses. This is done by setting // 'tgtPrefUse' to that RefPosition. RefPosition* tgtPrefUse = nullptr; RefPosition* tgtPrefUse2 = nullptr; // The following keep track of information about internal (temporary register) intervals // during the building of a single node. static const int MaxInternalCount = 5; RefPosition* internalDefs[MaxInternalCount]; int internalCount = 0; bool setInternalRegsDelayFree; // When a RefTypeUse is marked as 'delayRegFree', we also want to mark the RefTypeDef // in the next Location as 'hasInterferingUses'. This is accomplished by setting this // 'pendingDelayFree' to true as they are created, and clearing it as a new node is // handled in 'BuildNode'. bool pendingDelayFree; // This method clears the "build state" before starting to handle a new node. void clearBuildState() { tgtPrefUse = nullptr; tgtPrefUse2 = nullptr; internalCount = 0; setInternalRegsDelayFree = false; pendingDelayFree = false; } bool isCandidateMultiRegLclVar(GenTreeLclVar* lclNode); bool checkContainedOrCandidateLclVar(GenTreeLclVar* lclNode); RefPosition* BuildUse(GenTree* operand, regMaskTP candidates = RBM_NONE, int multiRegIdx = 0); void setDelayFree(RefPosition* use); int BuildBinaryUses(GenTreeOp* node, regMaskTP candidates = RBM_NONE); #ifdef TARGET_XARCH int BuildRMWUses(GenTree* node, GenTree* op1, GenTree* op2, regMaskTP candidates = RBM_NONE); #endif // !TARGET_XARCH // This is the main entry point for building the RefPositions for a node. // These methods return the number of sources. int BuildNode(GenTree* tree); void getTgtPrefOperands(GenTree* tree, GenTree* op1, GenTree* op2, bool* prefOp1, bool* prefOp2); bool supportsSpecialPutArg(); int BuildSimple(GenTree* tree); int BuildOperandUses(GenTree* node, regMaskTP candidates = RBM_NONE); int BuildDelayFreeUses(GenTree* node, GenTree* rmwNode = nullptr, regMaskTP candidates = RBM_NONE); int BuildIndirUses(GenTreeIndir* indirTree, regMaskTP candidates = RBM_NONE); int BuildAddrUses(GenTree* addr, regMaskTP candidates = RBM_NONE); void HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* callHasFloatRegArgs); RefPosition* BuildDef(GenTree* tree, regMaskTP dstCandidates = RBM_NONE, int multiRegIdx = 0); void BuildDefs(GenTree* tree, int dstCount, regMaskTP dstCandidates = RBM_NONE); void BuildDefsWithKills(GenTree* tree, int dstCount, regMaskTP dstCandidates, regMaskTP killMask); int BuildReturn(GenTree* tree); #ifdef TARGET_XARCH // This method, unlike the others, returns the number of sources, since it may be called when // 'tree' is contained. int BuildShiftRotate(GenTree* tree); #endif // TARGET_XARCH #ifdef TARGET_ARM int BuildShiftLongCarry(GenTree* tree); #endif int BuildPutArgReg(GenTreeUnOp* node); int BuildCall(GenTreeCall* call); int BuildCmp(GenTree* tree); int BuildBlockStore(GenTreeBlk* blkNode); int BuildModDiv(GenTree* tree); int BuildIntrinsic(GenTree* tree); void BuildStoreLocDef(GenTreeLclVarCommon* storeLoc, LclVarDsc* varDsc, RefPosition* singleUseRef, int index); int BuildMultiRegStoreLoc(GenTreeLclVar* storeLoc); int BuildStoreLoc(GenTreeLclVarCommon* tree); int BuildIndir(GenTreeIndir* indirTree); int BuildGCWriteBarrier(GenTree* tree); int BuildCast(GenTreeCast* cast); #if defined(TARGET_XARCH) // returns true if the tree can use the read-modify-write memory instruction form bool isRMWRegOper(GenTree* tree); int BuildMul(GenTree* tree); void SetContainsAVXFlags(unsigned sizeOfSIMDVector = 0); #endif // defined(TARGET_XARCH) #if defined(TARGET_X86) // Move the last use bit, if any, from 'fromTree' to 'toTree'; 'fromTree' must be contained. void CheckAndMoveRMWLastUse(GenTree* fromTree, GenTree* toTree) { // If 'fromTree' is not a last-use lclVar, there's nothing to do. if ((fromTree == nullptr) || !fromTree->OperIs(GT_LCL_VAR) || ((fromTree->gtFlags & GTF_VAR_DEATH) == 0)) { return; } // If 'fromTree' was a lclVar, it must be contained and 'toTree' must match. if (!fromTree->isContained() || (toTree == nullptr) || !toTree->OperIs(GT_LCL_VAR) || (fromTree->AsLclVarCommon()->GetLclNum() != toTree->AsLclVarCommon()->GetLclNum())) { assert(!"Unmatched RMW indirections"); return; } // This is probably not necessary, but keeps things consistent. fromTree->gtFlags &= ~GTF_VAR_DEATH; toTree->gtFlags |= GTF_VAR_DEATH; } #endif // TARGET_X86 #ifdef FEATURE_SIMD int BuildSIMD(GenTreeSIMD* tree); #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS int BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree, int* pDstCount); #endif // FEATURE_HW_INTRINSICS int BuildPutArgStk(GenTreePutArgStk* argNode); #if FEATURE_ARG_SPLIT int BuildPutArgSplit(GenTreePutArgSplit* tree); #endif // FEATURE_ARG_SPLIT int BuildLclHeap(GenTree* tree); }; /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Interval XX XX XX XX This is the fundamental data structure for linear scan register XX XX allocation. It represents the live range(s) for a variable or temp. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ class Interval : public Referenceable { public: Interval(RegisterType registerType, regMaskTP registerPreferences) : registerPreferences(registerPreferences) , relatedInterval(nullptr) , assignedReg(nullptr) , varNum(0) , physReg(REG_COUNT) , registerType(registerType) , isActive(false) , isLocalVar(false) , isSplit(false) , isSpilled(false) , isInternal(false) , isStructField(false) , isPromotedStruct(false) , hasConflictingDefUse(false) , hasInterferingUses(false) , isSpecialPutArg(false) , preferCalleeSave(false) , isConstant(false) #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE , isUpperVector(false) , isPartiallySpilled(false) #endif , isWriteThru(false) , isSingleDef(false) #ifdef DEBUG , intervalIndex(0) #endif { } #ifdef DEBUG // print out representation void dump(); // concise representation for embedding void tinyDump(); // extremely concise representation void microDump(); #endif // DEBUG void setLocalNumber(Compiler* compiler, unsigned lclNum, LinearScan* l); // Fixed registers for which this Interval has a preference regMaskTP registerPreferences; // The relatedInterval is: // - for any other interval, it is the interval to which this interval // is currently preferenced (e.g. because they are related by a copy) Interval* relatedInterval; // The assignedReg is the RecRecord for the register to which this interval // has been assigned at some point - if the interval is active, this is the // register it currently occupies. RegRecord* assignedReg; unsigned int varNum; // This is the "variable number": the index into the lvaTable array // The register to which it is currently assigned. regNumber physReg; RegisterType registerType; // Is this Interval currently in a register and live? bool isActive; bool isLocalVar : 1; // Indicates whether this interval has been assigned to different registers bool isSplit : 1; // Indicates whether this interval is ever spilled bool isSpilled : 1; // indicates an interval representing the internal requirements for // generating code for a node (temp registers internal to the node) // Note that this interval may live beyond a node in the GT_ARR_LENREF/GT_IND // case (though never lives beyond a stmt) bool isInternal : 1; // true if this is a LocalVar for a struct field bool isStructField : 1; // true iff this is a GT_LDOBJ for a fully promoted (PROMOTION_TYPE_INDEPENDENT) struct bool isPromotedStruct : 1; // true if this is an SDSU interval for which the def and use have conflicting register // requirements bool hasConflictingDefUse : 1; // true if this interval's defining node has "delayRegFree" uses, either due to it being an RMW instruction, // OR because it requires an internal register that differs from the target. bool hasInterferingUses : 1; // True if this interval is defined by a putArg, whose source is a non-last-use lclVar. // During allocation, this flag will be cleared if the source is not already in the required register. // Othewise, we will leave the register allocated to the lclVar, but mark the RegRecord as // isBusyUntilKill, so that it won't be reused if the lclVar goes dead before the call. bool isSpecialPutArg : 1; // True if this interval interferes with a call. bool preferCalleeSave : 1; // True if this interval is defined by a constant node that may be reused and/or may be // able to reuse a constant that's already in a register. bool isConstant : 1; #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // True if this is a special interval for saving the upper half of a large vector. bool isUpperVector : 1; // This is a convenience method to avoid ifdef's everywhere this is used. bool IsUpperVector() const { return isUpperVector; } // True if this interval has been partially spilled bool isPartiallySpilled : 1; #else bool IsUpperVector() const { return false; } #endif // True if this interval is associated with a lclVar that is written to memory at each definition. bool isWriteThru : 1; // True if this interval has a single definition. bool isSingleDef : 1; #ifdef DEBUG unsigned int intervalIndex; #endif // DEBUG LclVarDsc* getLocalVar(Compiler* comp) { assert(isLocalVar); return comp->lvaGetDesc(this->varNum); } // Get the local tracked variable "index" (lvVarIndex), used in bitmasks. unsigned getVarIndex(Compiler* comp) { LclVarDsc* varDsc = getLocalVar(comp); assert(varDsc->lvTracked); // If this isn't true, we shouldn't be calling this function! return varDsc->lvVarIndex; } bool isAssignedTo(regNumber regNum) { // This uses regMasks to handle the case where a double actually occupies two registers // TODO-Throughput: This could/should be done more cheaply. return (physReg != REG_NA && (genRegMask(physReg, registerType) & genRegMask(regNum)) != RBM_NONE); } // Assign the related interval. void assignRelatedInterval(Interval* newRelatedInterval) { #ifdef DEBUG if (VERBOSE) { printf("Assigning related "); newRelatedInterval->microDump(); printf(" to "); this->microDump(); printf("\n"); } #endif // DEBUG relatedInterval = newRelatedInterval; } // Assign the related interval, but only if it isn't already assigned. bool assignRelatedIntervalIfUnassigned(Interval* newRelatedInterval) { if (relatedInterval == nullptr) { assignRelatedInterval(newRelatedInterval); return true; } else { #ifdef DEBUG if (VERBOSE) { printf("Interval "); this->microDump(); printf(" already has a related interval\n"); } #endif // DEBUG return false; } } // Get the current preferences for this Interval. // Note that when we have an assigned register we don't necessarily update the // registerPreferences to that register, as there may be multiple, possibly disjoint, // definitions. This method will return the current assigned register if any, or // the 'registerPreferences' otherwise. // regMaskTP getCurrentPreferences() { return (assignedReg == nullptr) ? registerPreferences : genRegMask(assignedReg->regNum); } void mergeRegisterPreferences(regMaskTP preferences) { // We require registerPreferences to have been initialized. assert(registerPreferences != RBM_NONE); // It is invalid to update with empty preferences assert(preferences != RBM_NONE); regMaskTP commonPreferences = (registerPreferences & preferences); if (commonPreferences != RBM_NONE) { registerPreferences = commonPreferences; return; } // There are no preferences in common. // Preferences need to reflect both cases where a var must occupy a specific register, // as well as cases where a var is live when a register is killed. // In the former case, we would like to record all such registers, however we don't // really want to use any registers that will interfere. // To approximate this, we never "or" together multi-reg sets, which are generally kill sets. if (!genMaxOneBit(preferences)) { // The new preference value is a multi-reg set, so it's probably a kill. // Keep the new value. registerPreferences = preferences; return; } if (!genMaxOneBit(registerPreferences)) { // The old preference value is a multi-reg set. // Keep the existing preference set, as it probably reflects one or more kills. // It may have been a union of multiple individual registers, but we can't // distinguish that case without extra cost. return; } // If we reach here, we have two disjoint single-reg sets. // Keep only the callee-save preferences, if not empty. // Otherwise, take the union of the preferences. regMaskTP newPreferences = registerPreferences | preferences; if (preferCalleeSave) { regMaskTP calleeSaveMask = (calleeSaveRegs(this->registerType) & (newPreferences)); if (calleeSaveMask != RBM_NONE) { newPreferences = calleeSaveMask; } } registerPreferences = newPreferences; } // Update the registerPreferences on the interval. // If there are conflicting requirements on this interval, set the preferences to // the union of them. That way maybe we'll get at least one of them. // An exception is made in the case where one of the existing or new // preferences are all callee-save, in which case we "prefer" the callee-save void updateRegisterPreferences(regMaskTP preferences) { // If this interval is preferenced, that interval may have already been assigned a // register, and we want to include that in the preferences. if ((relatedInterval != nullptr) && !relatedInterval->isActive) { mergeRegisterPreferences(relatedInterval->getCurrentPreferences()); } // Now merge the new preferences. mergeRegisterPreferences(preferences); } }; class RefPosition { public: // A RefPosition refers to either an Interval or a RegRecord. 'referent' points to one // of these types. If it refers to a RegRecord, then 'isPhysRegRef()' is true. If it // refers to an Interval, then 'isPhysRegRef()' is false. // referent can never be null. Referenceable* referent; // nextRefPosition is the next in code order. // Note that in either case there is no need for these to be doubly linked, as they // are only traversed in the forward direction, and are not moved. RefPosition* nextRefPosition; // The remaining fields are common to both options GenTree* treeNode; unsigned int bbNum; LsraLocation nodeLocation; // Prior to the allocation pass, registerAssignment captures the valid registers // for this RefPosition. // After the allocation pass, this contains the actual assignment regMaskTP registerAssignment; RefType refType; // NOTE: C++ only packs bitfields if the base type is the same. So make all the base // NOTE: types of the logically "bool" types that follow 'unsigned char', so they match // NOTE: RefType that precedes this, and multiRegIdx can also match. // Indicates whether this ref position is to be allocated a reg only if profitable. Currently these are the // ref positions that lower/codegen has indicated as reg optional and is considered a contained memory operand if // no reg is allocated. unsigned char regOptional : 1; // Used by RefTypeDef/Use positions of a multi-reg call node. // Indicates the position of the register that this ref position refers to. // The max bits needed is based on max value of MAX_RET_REG_COUNT value // across all targets and that happens 4 on on Arm. Hence index value // would be 0..MAX_RET_REG_COUNT-1. unsigned char multiRegIdx : 2; // Last Use - this may be true for multiple RefPositions in the same Interval unsigned char lastUse : 1; // Spill and Copy info // reload indicates that the value was spilled, and must be reloaded here. // spillAfter indicates that the value is spilled here, so a spill must be added. // singleDefSpill indicates that it is associated with a single-def var and if it // is decided to get spilled, it will be spilled at firstRefPosition def. That // way, the the value of stack will always be up-to-date and no more spills or // resolutions (from reg to stack) will be needed for such single-def var. // copyReg indicates that the value needs to be copied to a specific register, // but that it will also retain its current assigned register. // moveReg indicates that the value needs to be moved to a different register, // and that this will be its new assigned register. // A RefPosition may have any flag individually or the following combinations: // - reload and spillAfter (i.e. it remains in memory), but not in combination with copyReg or moveReg // (reload cannot exist with copyReg or moveReg; it should be reloaded into the appropriate reg) // - spillAfter and copyReg (i.e. it must be copied to a new reg for use, but is then spilled) // - spillAfter and moveReg (i.e. it most be both spilled and moved) // NOTE: a moveReg involves an explicit move, and would usually not be needed for a fixed Reg if it is going // to be spilled, because the code generator will do the move to the fixed register, and doesn't need to // record the new register location as the new "home" location of the lclVar. However, if there is a conflicting // use at the same location (e.g. lclVar V1 is in rdx and needs to be in rcx, but V2 needs to be in rdx), then // we need an explicit move. // - copyReg and moveReg must not exist with each other. unsigned char reload : 1; unsigned char spillAfter : 1; unsigned char singleDefSpill : 1; unsigned char writeThru : 1; // true if this var is defined in a register and also spilled. spillAfter must NOT be // set. unsigned char copyReg : 1; unsigned char moveReg : 1; // true if this var is moved to a new register unsigned char isPhysRegRef : 1; // true if 'referent' points of a RegRecord, false if it points to an Interval unsigned char isFixedRegRef : 1; unsigned char isLocalDefUse : 1; // delayRegFree indicates that the register should not be freed right away, but instead wait // until the next Location after it would normally be freed. This is used for the case of // non-commutative binary operators, where op2 must not be assigned the same register as // the target. We do this by not freeing it until after the target has been defined. // Another option would be to actually change the Location of the op2 use until the same // Location as the def, but then it could potentially reuse a register that has been freed // from the other source(s), e.g. if it's a lastUse or spilled. unsigned char delayRegFree : 1; // outOfOrder is marked on a (non-def) RefPosition that doesn't follow a definition of the // register currently assigned to the Interval. This happens when we use the assigned // register from a predecessor that is not the most recently allocated BasicBlock. unsigned char outOfOrder : 1; #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // If upper vector save/restore can be avoided. unsigned char skipSaveRestore : 1; #endif #ifdef DEBUG // Minimum number registers that needs to be ensured while // constraining candidates for this ref position under // LSRA stress. unsigned minRegCandidateCount; // The unique RefPosition number, equal to its index in the // refPositions list. Only used for debugging dumps. unsigned rpNum; #endif // DEBUG RefPosition(unsigned int bbNum, LsraLocation nodeLocation, GenTree* treeNode, RefType refType) : referent(nullptr) , nextRefPosition(nullptr) , treeNode(treeNode) , bbNum(bbNum) , nodeLocation(nodeLocation) , registerAssignment(RBM_NONE) , refType(refType) , multiRegIdx(0) , lastUse(false) , reload(false) , spillAfter(false) , singleDefSpill(false) , writeThru(false) , copyReg(false) , moveReg(false) , isPhysRegRef(false) , isFixedRegRef(false) , isLocalDefUse(false) , delayRegFree(false) , outOfOrder(false) #ifdef DEBUG , minRegCandidateCount(1) , rpNum(0) #endif { } Interval* getInterval() { assert(!isPhysRegRef); return (Interval*)referent; } void setInterval(Interval* i) { referent = i; isPhysRegRef = false; } RegRecord* getReg() { assert(isPhysRegRef); return (RegRecord*)referent; } void setReg(RegRecord* r) { referent = r; isPhysRegRef = true; registerAssignment = genRegMask(r->regNum); } regNumber assignedReg() { if (registerAssignment == RBM_NONE) { return REG_NA; } return genRegNumFromMask(registerAssignment); } // Returns true if it is a reference on a gentree node. bool IsActualRef() { switch (refType) { case RefTypeDef: case RefTypeUse: #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE case RefTypeUpperVectorSave: case RefTypeUpperVectorRestore: #endif return true; // These must always be marked RegOptional. case RefTypeExpUse: case RefTypeParamDef: case RefTypeDummyDef: case RefTypeZeroInit: assert(RegOptional()); return false; default: return false; } } bool IsPhysRegRef() { return ((refType == RefTypeFixedReg) || (refType == RefTypeKill)); } void setRegOptional(bool val) { regOptional = val; } // Returns true whether this ref position is to be allocated // a reg only if it is profitable. bool RegOptional() { // TODO-CQ: Right now if a ref position is marked as // copyreg or movereg, then it is not treated as // 'allocate if profitable'. This is an implementation // limitation that needs to be addressed. return regOptional && !copyReg && !moveReg; } void setMultiRegIdx(unsigned idx) { multiRegIdx = idx; assert(multiRegIdx == idx); } unsigned getMultiRegIdx() { return multiRegIdx; } LsraLocation getRefEndLocation() { return delayRegFree ? nodeLocation + 1 : nodeLocation; } RefPosition* getRangeEndRef() { if (lastUse || nextRefPosition == nullptr || spillAfter) { return this; } // It would seem to make sense to only return 'nextRefPosition' if it is a lastUse, // and otherwise return `lastRefPosition', but that tends to excessively lengthen // the range for heuristic purposes. // TODO-CQ: Look into how this might be improved . return nextRefPosition; } LsraLocation getRangeEndLocation() { return getRangeEndRef()->getRefEndLocation(); } bool isIntervalRef() { return (!IsPhysRegRef() && (referent != nullptr)); } // isFixedRefOfRegMask indicates that the RefPosition has a fixed assignment to the register // specified by the given mask bool isFixedRefOfRegMask(regMaskTP regMask) { assert(genMaxOneBit(regMask)); return (registerAssignment == regMask); } // isFixedRefOfReg indicates that the RefPosition has a fixed assignment to the given register bool isFixedRefOfReg(regNumber regNum) { return (isFixedRefOfRegMask(genRegMask(regNum))); } #ifdef DEBUG // operator= copies everything except 'rpNum', which must remain unique RefPosition& operator=(const RefPosition& rp) { unsigned rpNumSave = rpNum; memcpy(this, &rp, sizeof(rp)); rpNum = rpNumSave; return *this; } void dump(LinearScan* linearScan); #endif // DEBUG }; #ifdef DEBUG void dumpRegMask(regMaskTP regs); #endif // DEBUG /*****************************************************************************/ #endif //_LSRA_H_ /*****************************************************************************/
1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/jit/lsrabuild.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Interval and RefPosition Building XX XX XX XX This contains the logic for constructing Intervals and RefPositions that XX XX is common across architectures. See lsra{arch}.cpp for the architecture- XX XX specific methods for building. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "lsra.h" //------------------------------------------------------------------------ // RefInfoList //------------------------------------------------------------------------ // removeListNode - retrieve the RefInfoListNode for the given GenTree node // // Notes: // The BuildNode methods use this helper to retrieve the RefPositions for child nodes // from the useList being constructed. Note that, if the user knows the order of the operands, // it is expected that they should just retrieve them directly. RefInfoListNode* RefInfoList::removeListNode(GenTree* node) { RefInfoListNode* prevListNode = nullptr; for (RefInfoListNode *listNode = Begin(), *end = End(); listNode != end; listNode = listNode->Next()) { if (listNode->treeNode == node) { assert(listNode->ref->getMultiRegIdx() == 0); return removeListNode(listNode, prevListNode); } prevListNode = listNode; } assert(!"removeListNode didn't find the node"); unreached(); } //------------------------------------------------------------------------ // removeListNode - retrieve the RefInfoListNode for one reg of the given multireg GenTree node // // Notes: // The BuildNode methods use this helper to retrieve the RefPositions for child nodes // from the useList being constructed. Note that, if the user knows the order of the operands, // it is expected that they should just retrieve them directly. RefInfoListNode* RefInfoList::removeListNode(GenTree* node, unsigned multiRegIdx) { RefInfoListNode* prevListNode = nullptr; for (RefInfoListNode *listNode = Begin(), *end = End(); listNode != end; listNode = listNode->Next()) { if ((listNode->treeNode == node) && (listNode->ref->getMultiRegIdx() == multiRegIdx)) { return removeListNode(listNode, prevListNode); } prevListNode = listNode; } assert(!"removeListNode didn't find the node"); unreached(); } //------------------------------------------------------------------------ // RefInfoListNodePool::RefInfoListNodePool: // Creates a pool of `RefInfoListNode` values. // // Arguments: // compiler - The compiler context. // preallocate - The number of nodes to preallocate. // RefInfoListNodePool::RefInfoListNodePool(Compiler* compiler, unsigned preallocate) : m_compiler(compiler) { if (preallocate > 0) { RefInfoListNode* preallocatedNodes = compiler->getAllocator(CMK_LSRA).allocate<RefInfoListNode>(preallocate); RefInfoListNode* head = preallocatedNodes; head->m_next = nullptr; for (unsigned i = 1; i < preallocate; i++) { RefInfoListNode* node = &preallocatedNodes[i]; node->m_next = head; head = node; } m_freeList = head; } } //------------------------------------------------------------------------ // RefInfoListNodePool::GetNode: Fetches an unused node from the // pool. // // Arguments: // r - The `RefPosition` for the `RefInfo` value. // t - The IR node for the `RefInfo` value // // Returns: // A pooled or newly-allocated `RefInfoListNode`, depending on the // contents of the pool. RefInfoListNode* RefInfoListNodePool::GetNode(RefPosition* r, GenTree* t) { RefInfoListNode* head = m_freeList; if (head == nullptr) { head = m_compiler->getAllocator(CMK_LSRA).allocate<RefInfoListNode>(1); } else { m_freeList = head->m_next; } head->ref = r; head->treeNode = t; head->m_next = nullptr; return head; } //------------------------------------------------------------------------ // RefInfoListNodePool::ReturnNode: Returns a list of nodes to the node // pool and clears the given list. // // Arguments: // list - The list to return. // void RefInfoListNodePool::ReturnNode(RefInfoListNode* listNode) { listNode->m_next = m_freeList; m_freeList = listNode; } //------------------------------------------------------------------------ // newInterval: Create a new Interval of the given RegisterType. // // Arguments: // theRegisterType - The type of Interval to create. // // TODO-Cleanup: Consider adding an overload that takes a varDsc, and can appropriately // set such fields as isStructField // Interval* LinearScan::newInterval(RegisterType theRegisterType) { intervals.emplace_back(theRegisterType, allRegs(theRegisterType)); Interval* newInt = &intervals.back(); #ifdef DEBUG newInt->intervalIndex = static_cast<unsigned>(intervals.size() - 1); #endif // DEBUG DBEXEC(VERBOSE, newInt->dump()); return newInt; } //------------------------------------------------------------------------ // newRefPositionRaw: Create a new RefPosition // // Arguments: // nodeLocation - The location of the reference. // treeNode - The GenTree of the reference. // refType - The type of reference // // Notes: // This is used to create RefPositions for both RegRecords and Intervals, // so it does only the common initialization. // RefPosition* LinearScan::newRefPositionRaw(LsraLocation nodeLocation, GenTree* treeNode, RefType refType) { refPositions.emplace_back(curBBNum, nodeLocation, treeNode, refType); RefPosition* newRP = &refPositions.back(); #ifdef DEBUG newRP->rpNum = static_cast<unsigned>(refPositions.size() - 1); #endif // DEBUG return newRP; } //------------------------------------------------------------------------ // resolveConflictingDefAndUse: Resolve the situation where we have conflicting def and use // register requirements on a single-def, single-use interval. // // Arguments: // defRefPosition - The interval definition // useRefPosition - The (sole) interval use // // Return Value: // None. // // Assumptions: // The two RefPositions are for the same interval, which is a tree-temp. // // Notes: // We require some special handling for the case where the use is a "delayRegFree" case of a fixedReg. // In that case, if we change the registerAssignment on the useRefPosition, we will lose the fact that, // even if we assign a different register (and rely on codegen to do the copy), that fixedReg also needs // to remain busy until the Def register has been allocated. In that case, we don't allow Case 1 or Case 4 // below. // Here are the cases we consider (in this order): // 1. If The defRefPosition specifies a single register, and there are no conflicting // FixedReg uses of it between the def and use, we use that register, and the code generator // will insert the copy. Note that it cannot be in use because there is a FixedRegRef for the def. // 2. If the useRefPosition specifies a single register, and it is not in use, and there are no // conflicting FixedReg uses of it between the def and use, we use that register, and the code generator // will insert the copy. // 3. If the defRefPosition specifies a single register (but there are conflicts, as determined // in 1.), and there are no conflicts with the useRefPosition register (if it's a single register), /// we set the register requirements on the defRefPosition to the use registers, and the // code generator will insert a copy on the def. We can't rely on the code generator to put a copy // on the use if it has multiple possible candidates, as it won't know which one has been allocated. // 4. If the useRefPosition specifies a single register, and there are no conflicts with the register // on the defRefPosition, we leave the register requirements on the defRefPosition as-is, and set // the useRefPosition to the def registers, for similar reasons to case #3. // 5. If both the defRefPosition and the useRefPosition specify single registers, but both have conflicts, // We set the candiates on defRefPosition to be all regs of the appropriate type, and since they are // single registers, codegen can insert the copy. // 6. Finally, if the RefPositions specify disjoint subsets of the registers (or the use is fixed but // has a conflict), we must insert a copy. The copy will be inserted before the use if the // use is not fixed (in the fixed case, the code generator will insert the use). // // TODO-CQ: We get bad register allocation in case #3 in the situation where no register is // available for the lifetime. We end up allocating a register that must be spilled, and it probably // won't be the register that is actually defined by the target instruction. So, we have to copy it // and THEN spill it. In this case, we should be using the def requirement. But we need to change // the interface to this method a bit to make that work (e.g. returning a candidate set to use, but // leaving the registerAssignment as-is on the def, so that if we find that we need to spill anyway // we can use the fixed-reg on the def. // void LinearScan::resolveConflictingDefAndUse(Interval* interval, RefPosition* defRefPosition) { assert(!interval->isLocalVar); RefPosition* useRefPosition = defRefPosition->nextRefPosition; regMaskTP defRegAssignment = defRefPosition->registerAssignment; regMaskTP useRegAssignment = useRefPosition->registerAssignment; RegRecord* defRegRecord = nullptr; RegRecord* useRegRecord = nullptr; regNumber defReg = REG_NA; regNumber useReg = REG_NA; bool defRegConflict = ((defRegAssignment & useRegAssignment) == RBM_NONE); bool useRegConflict = defRegConflict; // If the useRefPosition is a "delayRegFree", we can't change the registerAssignment // on it, or we will fail to ensure that the fixedReg is busy at the time the target // (of the node that uses this interval) is allocated. bool canChangeUseAssignment = !useRefPosition->isFixedRegRef || !useRefPosition->delayRegFree; INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CONFLICT)); if (!canChangeUseAssignment) { INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_FIXED_DELAY_USE)); } if (defRefPosition->isFixedRegRef && !defRegConflict) { defReg = defRefPosition->assignedReg(); defRegRecord = getRegisterRecord(defReg); if (canChangeUseAssignment) { RefPosition* currFixedRegRefPosition = defRegRecord->recentRefPosition; assert(currFixedRegRefPosition != nullptr && currFixedRegRefPosition->nodeLocation == defRefPosition->nodeLocation); if (currFixedRegRefPosition->nextRefPosition == nullptr || currFixedRegRefPosition->nextRefPosition->nodeLocation > useRefPosition->getRefEndLocation()) { // This is case #1. Use the defRegAssignment INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE1)); useRefPosition->registerAssignment = defRegAssignment; return; } else { defRegConflict = true; } } } if (useRefPosition->isFixedRegRef && !useRegConflict) { useReg = useRefPosition->assignedReg(); useRegRecord = getRegisterRecord(useReg); // We know that useRefPosition is a fixed use, so the nextRefPosition must not be null. RefPosition* nextFixedRegRefPosition = useRegRecord->getNextRefPosition(); assert(nextFixedRegRefPosition != nullptr && nextFixedRegRefPosition->nodeLocation <= useRefPosition->nodeLocation); // First, check to see if there are any conflicting FixedReg references between the def and use. if (nextFixedRegRefPosition->nodeLocation == useRefPosition->nodeLocation) { // OK, no conflicting FixedReg references. // Now, check to see whether it is currently in use. if (useRegRecord->assignedInterval != nullptr) { RefPosition* possiblyConflictingRef = useRegRecord->assignedInterval->recentRefPosition; LsraLocation possiblyConflictingRefLocation = possiblyConflictingRef->getRefEndLocation(); if (possiblyConflictingRefLocation >= defRefPosition->nodeLocation) { useRegConflict = true; } } if (!useRegConflict) { // This is case #2. Use the useRegAssignment INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE2, interval)); defRefPosition->registerAssignment = useRegAssignment; return; } } else { useRegConflict = true; } } if (defRegRecord != nullptr && !useRegConflict) { // This is case #3. INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE3, interval)); defRefPosition->registerAssignment = useRegAssignment; return; } if (useRegRecord != nullptr && !defRegConflict && canChangeUseAssignment) { // This is case #4. INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE4, interval)); useRefPosition->registerAssignment = defRegAssignment; return; } if (defRegRecord != nullptr && useRegRecord != nullptr) { // This is case #5. INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE5, interval)); RegisterType regType = interval->registerType; assert((getRegisterType(interval, defRefPosition) == regType) && (getRegisterType(interval, useRefPosition) == regType)); regMaskTP candidates = allRegs(regType); defRefPosition->registerAssignment = candidates; defRefPosition->isFixedRegRef = false; return; } INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE6, interval)); return; } //------------------------------------------------------------------------ // applyCalleeSaveHeuristics: Set register preferences for an interval based on the given RefPosition // // Arguments: // rp - The RefPosition of interest // // Notes: // This is slightly more general than its name applies, and updates preferences not just // for callee-save registers. // void LinearScan::applyCalleeSaveHeuristics(RefPosition* rp) { #ifdef TARGET_AMD64 if (compiler->opts.compDbgEnC) { // We only use RSI and RDI for EnC code, so we don't want to favor callee-save regs. return; } #endif // TARGET_AMD64 Interval* theInterval = rp->getInterval(); #ifdef DEBUG if (!doReverseCallerCallee()) #endif // DEBUG { // Set preferences so that this register set will be preferred for earlier refs theInterval->mergeRegisterPreferences(rp->registerAssignment); } } //------------------------------------------------------------------------ // checkConflictingDefUse: Ensure that we have consistent def/use on SDSU temps. // // Arguments: // useRP - The use RefPosition of a tree temp (SDSU Interval) // // Notes: // There are a couple of cases where this may over-constrain allocation: // 1. In the case of a non-commutative rmw def (in which the rmw source must be delay-free), or // 2. In the case where the defining node requires a temp distinct from the target (also a // delay-free case). // In those cases, if we propagate a single-register restriction from the consumer to the producer // the delayed uses will not see a fixed reference in the PhysReg at that position, and may // incorrectly allocate that register. // TODO-CQ: This means that we may often require a copy at the use of this node's result. // This case could be moved to BuildRefPositionsForNode, at the point where the def RefPosition is // created, causing a RefTypeFixedReg to be added at that location. This, however, results in // more PhysReg RefPositions (a throughput impact), and a large number of diffs that require // further analysis to determine benefit. // See Issue #11274. // void LinearScan::checkConflictingDefUse(RefPosition* useRP) { assert(useRP->refType == RefTypeUse); Interval* theInterval = useRP->getInterval(); assert(!theInterval->isLocalVar); RefPosition* defRP = theInterval->firstRefPosition; // All defs must have a valid treeNode, but we check it below to be conservative. assert(defRP->treeNode != nullptr); regMaskTP prevAssignment = defRP->registerAssignment; regMaskTP newAssignment = (prevAssignment & useRP->registerAssignment); if (newAssignment != RBM_NONE) { if (!isSingleRegister(newAssignment) || !theInterval->hasInterferingUses) { defRP->registerAssignment = newAssignment; } } else { theInterval->hasConflictingDefUse = true; } } //------------------------------------------------------------------------ // associateRefPosWithInterval: Update the Interval based on the given RefPosition. // // Arguments: // rp - The RefPosition of interest // // Notes: // This is called at the time when 'rp' has just been created, so it becomes // the nextRefPosition of the recentRefPosition, and both the recentRefPosition // and lastRefPosition of its referent. // void LinearScan::associateRefPosWithInterval(RefPosition* rp) { Referenceable* theReferent = rp->referent; if (theReferent != nullptr) { // All RefPositions except the dummy ones at the beginning of blocks if (rp->isIntervalRef()) { Interval* theInterval = rp->getInterval(); applyCalleeSaveHeuristics(rp); if (theInterval->isLocalVar) { if (RefTypeIsUse(rp->refType)) { RefPosition* const prevRP = theInterval->recentRefPosition; if ((prevRP != nullptr) && (prevRP->bbNum == rp->bbNum)) { prevRP->lastUse = false; } } rp->lastUse = (rp->refType != RefTypeExpUse) && (rp->refType != RefTypeParamDef) && (rp->refType != RefTypeZeroInit) && !extendLifetimes(); } else if (rp->refType == RefTypeUse) { checkConflictingDefUse(rp); rp->lastUse = true; } } RefPosition* prevRP = theReferent->recentRefPosition; if (prevRP != nullptr) { prevRP->nextRefPosition = rp; } else { theReferent->firstRefPosition = rp; } theReferent->recentRefPosition = rp; theReferent->lastRefPosition = rp; } else { assert((rp->refType == RefTypeBB) || (rp->refType == RefTypeKillGCRefs)); } } //--------------------------------------------------------------------------- // newRefPosition: allocate and initialize a new RefPosition. // // Arguments: // reg - reg number that identifies RegRecord to be associated // with this RefPosition // theLocation - LSRA location of RefPosition // theRefType - RefPosition type // theTreeNode - GenTree node for which this RefPosition is created // mask - Set of valid registers for this RefPosition // multiRegIdx - register position if this RefPosition corresponds to a // multi-reg call node. // // Return Value: // a new RefPosition // RefPosition* LinearScan::newRefPosition( regNumber reg, LsraLocation theLocation, RefType theRefType, GenTree* theTreeNode, regMaskTP mask) { RefPosition* newRP = newRefPositionRaw(theLocation, theTreeNode, theRefType); RegRecord* regRecord = getRegisterRecord(reg); newRP->setReg(regRecord); newRP->registerAssignment = mask; newRP->setMultiRegIdx(0); newRP->setRegOptional(false); // We can't have two RefPositions on a RegRecord at the same location, unless they are different types. assert((regRecord->lastRefPosition == nullptr) || (regRecord->lastRefPosition->nodeLocation < theLocation) || (regRecord->lastRefPosition->refType != theRefType)); associateRefPosWithInterval(newRP); DBEXEC(VERBOSE, newRP->dump(this)); return newRP; } //--------------------------------------------------------------------------- // newRefPosition: allocate and initialize a new RefPosition. // // Arguments: // theInterval - interval to which RefPosition is associated with. // theLocation - LSRA location of RefPosition // theRefType - RefPosition type // theTreeNode - GenTree node for which this RefPosition is created // mask - Set of valid registers for this RefPosition // multiRegIdx - register position if this RefPosition corresponds to a // multi-reg call node. // // Return Value: // a new RefPosition // RefPosition* LinearScan::newRefPosition(Interval* theInterval, LsraLocation theLocation, RefType theRefType, GenTree* theTreeNode, regMaskTP mask, unsigned multiRegIdx /* = 0 */) { if (theInterval != nullptr) { if (mask == RBM_NONE) { mask = allRegs(theInterval->registerType); } } else { assert(theRefType == RefTypeBB || theRefType == RefTypeKillGCRefs); } #ifdef DEBUG if (theInterval != nullptr && regType(theInterval->registerType) == FloatRegisterType) { // In the case we're using floating point registers we must make sure // this flag was set previously in the compiler since this will mandate // whether LSRA will take into consideration FP reg killsets. assert(compiler->compFloatingPointUsed || ((mask & RBM_FLT_CALLEE_SAVED) == 0)); } #endif // DEBUG // If this reference is constrained to a single register (and it's not a dummy // or Kill reftype already), add a RefTypeFixedReg at this location so that its // availability can be more accurately determined bool isFixedRegister = isSingleRegister(mask); bool insertFixedRef = false; if (isFixedRegister) { // Insert a RefTypeFixedReg for any normal def or use (not ParamDef or BB), // but not an internal use (it will already have a FixedRef for the def). if ((theRefType == RefTypeDef) || ((theRefType == RefTypeUse) && !theInterval->isInternal)) { insertFixedRef = true; } } if (insertFixedRef) { regNumber physicalReg = genRegNumFromMask(mask); RefPosition* pos = newRefPosition(physicalReg, theLocation, RefTypeFixedReg, nullptr, mask); assert(theInterval != nullptr); assert((allRegs(theInterval->registerType) & mask) != 0); } RefPosition* newRP = newRefPositionRaw(theLocation, theTreeNode, theRefType); newRP->setInterval(theInterval); // Spill info newRP->isFixedRegRef = isFixedRegister; #ifndef TARGET_AMD64 // We don't need this for AMD because the PInvoke method epilog code is explicit // at register allocation time. if (theInterval != nullptr && theInterval->isLocalVar && compiler->compMethodRequiresPInvokeFrame() && theInterval->varNum == compiler->genReturnLocal) { mask &= ~(RBM_PINVOKE_TCB | RBM_PINVOKE_FRAME); noway_assert(mask != RBM_NONE); } #endif // !TARGET_AMD64 newRP->registerAssignment = mask; newRP->setMultiRegIdx(multiRegIdx); newRP->setRegOptional(false); associateRefPosWithInterval(newRP); if (RefTypeIsDef(newRP->refType)) { assert(theInterval != nullptr); theInterval->isSingleDef = theInterval->firstRefPosition == newRP; } DBEXEC(VERBOSE, newRP->dump(this)); return newRP; } //--------------------------------------------------------------------------- // newUseRefPosition: allocate and initialize a RefTypeUse RefPosition at currentLoc. // // Arguments: // theInterval - interval to which RefPosition is associated with. // theTreeNode - GenTree node for which this RefPosition is created // mask - Set of valid registers for this RefPosition // multiRegIdx - register position if this RefPosition corresponds to a // multi-reg call node. // minRegCount - Minimum number registers that needs to be ensured while // constraining candidates for this ref position under // LSRA stress. This is a DEBUG only arg. // // Return Value: // a new RefPosition // // Notes: // If the caller knows that 'theTreeNode' is NOT a candidate local, newRefPosition // can/should be called directly. // RefPosition* LinearScan::newUseRefPosition(Interval* theInterval, GenTree* theTreeNode, regMaskTP mask, unsigned multiRegIdx) { GenTree* treeNode = isCandidateLocalRef(theTreeNode) ? theTreeNode : nullptr; RefPosition* pos = newRefPosition(theInterval, currentLoc, RefTypeUse, treeNode, mask, multiRegIdx); if (theTreeNode->IsRegOptional()) { pos->setRegOptional(true); } return pos; } //------------------------------------------------------------------------ // IsContainableMemoryOp: Checks whether this is a memory op that can be contained. // // Arguments: // node - the node of interest. // // Return value: // True if this will definitely be a memory reference that could be contained. // // Notes: // This differs from the isMemoryOp() method on GenTree because it checks for // the case of doNotEnregister local. This won't include locals that // for some other reason do not become register candidates, nor those that get // spilled. // Also, because we usually call this before we redo dataflow, any new lclVars // introduced after the last dataflow analysis will not yet be marked lvTracked, // so we don't use that. // bool LinearScan::isContainableMemoryOp(GenTree* node) { if (node->isMemoryOp()) { return true; } if (node->IsLocal()) { if (!enregisterLocalVars) { return true; } const LclVarDsc* varDsc = compiler->lvaGetDesc(node->AsLclVar()); return varDsc->lvDoNotEnregister; } return false; } //------------------------------------------------------------------------ // addRefsForPhysRegMask: Adds RefPositions of the given type for all the registers in 'mask'. // // Arguments: // mask - the mask (set) of registers. // currentLoc - the location at which they should be added // refType - the type of refposition // isLastUse - true IFF this is a last use of the register // void LinearScan::addRefsForPhysRegMask(regMaskTP mask, LsraLocation currentLoc, RefType refType, bool isLastUse) { if (refType == RefTypeKill) { // The mask identifies a set of registers that will be used during // codegen. Mark these as modified here, so when we do final frame // layout, we'll know about all these registers. This is especially // important if mask contains callee-saved registers, which affect the // frame size since we need to save/restore them. In the case where we // have a copyBlk with GC pointers, can need to call the // CORINFO_HELP_ASSIGN_BYREF helper, which kills callee-saved RSI and // RDI, if LSRA doesn't assign RSI/RDI, they wouldn't get marked as // modified until codegen, which is too late. compiler->codeGen->regSet.rsSetRegsModified(mask DEBUGARG(true)); } for (regNumber reg = REG_FIRST; mask; reg = REG_NEXT(reg), mask >>= 1) { if (mask & 1) { // This assumes that these are all "special" RefTypes that // don't need to be recorded on the tree (hence treeNode is nullptr) RefPosition* pos = newRefPosition(reg, currentLoc, refType, nullptr, genRegMask(reg)); // This MUST occupy the physical register (obviously) if (isLastUse) { pos->lastUse = true; } } } } //------------------------------------------------------------------------ // getKillSetForStoreInd: Determine the liveness kill set for a GT_STOREIND node. // If the GT_STOREIND will generate a write barrier, determine the specific kill // set required by the case-specific, platform-specific write barrier. If no // write barrier is required, the kill set will be RBM_NONE. // // Arguments: // tree - the GT_STOREIND node // // Return Value: a register mask of the registers killed // regMaskTP LinearScan::getKillSetForStoreInd(GenTreeStoreInd* tree) { assert(tree->OperIs(GT_STOREIND)); regMaskTP killMask = RBM_NONE; GenTree* data = tree->Data(); GCInfo::WriteBarrierForm writeBarrierForm = compiler->codeGen->gcInfo.gcIsWriteBarrierCandidate(tree, data); if (writeBarrierForm != GCInfo::WBF_NoBarrier) { if (compiler->codeGen->genUseOptimizedWriteBarriers(writeBarrierForm)) { // We can't determine the exact helper to be used at this point, because it depends on // the allocated register for the `data` operand. However, all the (x86) optimized // helpers have the same kill set: EDX. And note that currently, only x86 can return // `true` for genUseOptimizedWriteBarriers(). killMask = RBM_CALLEE_TRASH_NOGC; } else { // Figure out which helper we're going to use, and then get the kill set for that helper. CorInfoHelpFunc helper = compiler->codeGen->genWriteBarrierHelperForWriteBarrierForm(tree, writeBarrierForm); killMask = compiler->compHelperCallKillSet(helper); } } return killMask; } //------------------------------------------------------------------------ // getKillSetForShiftRotate: Determine the liveness kill set for a shift or rotate node. // // Arguments: // shiftNode - the shift or rotate node // // Return Value: a register mask of the registers killed // regMaskTP LinearScan::getKillSetForShiftRotate(GenTreeOp* shiftNode) { regMaskTP killMask = RBM_NONE; #ifdef TARGET_XARCH assert(shiftNode->OperIsShiftOrRotate()); GenTree* shiftBy = shiftNode->gtGetOp2(); if (!shiftBy->isContained()) { killMask = RBM_RCX; } #endif // TARGET_XARCH return killMask; } //------------------------------------------------------------------------ // getKillSetForMul: Determine the liveness kill set for a multiply node. // // Arguments: // tree - the multiply node // // Return Value: a register mask of the registers killed // regMaskTP LinearScan::getKillSetForMul(GenTreeOp* mulNode) { regMaskTP killMask = RBM_NONE; #ifdef TARGET_XARCH assert(mulNode->OperIsMul()); if (!mulNode->OperIs(GT_MUL) || (((mulNode->gtFlags & GTF_UNSIGNED) != 0) && mulNode->gtOverflowEx())) { killMask = RBM_RAX | RBM_RDX; } #endif // TARGET_XARCH return killMask; } //------------------------------------------------------------------------ // getKillSetForModDiv: Determine the liveness kill set for a mod or div node. // // Arguments: // tree - the mod or div node as a GenTreeOp // // Return Value: a register mask of the registers killed // regMaskTP LinearScan::getKillSetForModDiv(GenTreeOp* node) { regMaskTP killMask = RBM_NONE; #ifdef TARGET_XARCH assert(node->OperIs(GT_MOD, GT_DIV, GT_UMOD, GT_UDIV)); if (!varTypeIsFloating(node->TypeGet())) { // Both RAX and RDX are killed by the operation killMask = RBM_RAX | RBM_RDX; } #endif // TARGET_XARCH return killMask; } //------------------------------------------------------------------------ // getKillSetForCall: Determine the liveness kill set for a call node. // // Arguments: // tree - the GenTreeCall node // // Return Value: a register mask of the registers killed // regMaskTP LinearScan::getKillSetForCall(GenTreeCall* call) { regMaskTP killMask = RBM_CALLEE_TRASH; #ifdef TARGET_X86 if (compiler->compFloatingPointUsed) { if (call->TypeGet() == TYP_DOUBLE) { needDoubleTmpForFPCall = true; } else if (call->TypeGet() == TYP_FLOAT) { needFloatTmpForFPCall = true; } } #endif // TARGET_X86 if (call->IsHelperCall()) { CorInfoHelpFunc helpFunc = compiler->eeGetHelperNum(call->gtCallMethHnd); killMask = compiler->compHelperCallKillSet(helpFunc); } // if there is no FP used, we can ignore the FP kills if (!compiler->compFloatingPointUsed) { killMask &= ~RBM_FLT_CALLEE_TRASH; } #ifdef TARGET_ARM if (call->IsVirtualStub()) { killMask |= compiler->virtualStubParamInfo->GetRegMask(); } #else // !TARGET_ARM // Verify that the special virtual stub call registers are in the kill mask. // We don't just add them unconditionally to the killMask because for most architectures // they are already in the RBM_CALLEE_TRASH set, // and we don't want to introduce extra checks and calls in this hot function. assert(!call->IsVirtualStub() || ((killMask & compiler->virtualStubParamInfo->GetRegMask()) == compiler->virtualStubParamInfo->GetRegMask())); #endif // !TARGET_ARM return killMask; } //------------------------------------------------------------------------ // getKillSetForBlockStore: Determine the liveness kill set for a block store node. // // Arguments: // tree - the block store node as a GenTreeBlk // // Return Value: a register mask of the registers killed // regMaskTP LinearScan::getKillSetForBlockStore(GenTreeBlk* blkNode) { assert(blkNode->OperIsStore()); regMaskTP killMask = RBM_NONE; if ((blkNode->OperGet() == GT_STORE_OBJ) && blkNode->OperIsCopyBlkOp()) { assert(blkNode->AsObj()->GetLayout()->HasGCPtr()); killMask = compiler->compHelperCallKillSet(CORINFO_HELP_ASSIGN_BYREF); } else { bool isCopyBlk = varTypeIsStruct(blkNode->Data()); switch (blkNode->gtBlkOpKind) { #ifndef TARGET_X86 case GenTreeBlk::BlkOpKindHelper: if (isCopyBlk) { killMask = compiler->compHelperCallKillSet(CORINFO_HELP_MEMCPY); } else { killMask = compiler->compHelperCallKillSet(CORINFO_HELP_MEMSET); } break; #endif #ifdef TARGET_XARCH case GenTreeBlk::BlkOpKindRepInstr: if (isCopyBlk) { // rep movs kills RCX, RDI and RSI killMask = RBM_RCX | RBM_RDI | RBM_RSI; } else { // rep stos kills RCX and RDI. // (Note that the Data() node, if not constant, will be assigned to // RCX, but it's find that this kills it, as the value is not available // after this node in any case.) killMask = RBM_RDI | RBM_RCX; } break; #endif case GenTreeBlk::BlkOpKindUnroll: case GenTreeBlk::BlkOpKindInvalid: // for these 'gtBlkOpKind' kinds, we leave 'killMask' = RBM_NONE break; } } return killMask; } #ifdef FEATURE_HW_INTRINSICS //------------------------------------------------------------------------ // getKillSetForHWIntrinsic: Determine the liveness kill set for a GT_STOREIND node. // If the GT_STOREIND will generate a write barrier, determine the specific kill // set required by the case-specific, platform-specific write barrier. If no // write barrier is required, the kill set will be RBM_NONE. // // Arguments: // tree - the GT_STOREIND node // // Return Value: a register mask of the registers killed // regMaskTP LinearScan::getKillSetForHWIntrinsic(GenTreeHWIntrinsic* node) { regMaskTP killMask = RBM_NONE; #ifdef TARGET_XARCH switch (node->GetHWIntrinsicId()) { case NI_SSE2_MaskMove: // maskmovdqu uses edi as the implicit address register. // Although it is set as the srcCandidate on the address, if there is also a fixed // assignment for the definition of the address, resolveConflictingDefAndUse() may // change the register assignment on the def or use of a tree temp (SDSU) when there // is a conflict, and the FixedRef on edi won't be sufficient to ensure that another // Interval will not be allocated there. // Issue #17674 tracks this. killMask = RBM_EDI; break; default: // Leave killMask as RBM_NONE break; } #endif // TARGET_XARCH return killMask; } #endif // FEATURE_HW_INTRINSICS //------------------------------------------------------------------------ // getKillSetForReturn: Determine the liveness kill set for a return node. // // Arguments: // NONE (this kill set is independent of the details of the specific return.) // // Return Value: a register mask of the registers killed // regMaskTP LinearScan::getKillSetForReturn() { return compiler->compIsProfilerHookNeeded() ? compiler->compHelperCallKillSet(CORINFO_HELP_PROF_FCN_LEAVE) : RBM_NONE; } //------------------------------------------------------------------------ // getKillSetForProfilerHook: Determine the liveness kill set for a profiler hook. // // Arguments: // NONE (this kill set is independent of the details of the specific node.) // // Return Value: a register mask of the registers killed // regMaskTP LinearScan::getKillSetForProfilerHook() { return compiler->compIsProfilerHookNeeded() ? compiler->compHelperCallKillSet(CORINFO_HELP_PROF_FCN_TAILCALL) : RBM_NONE; } #ifdef DEBUG //------------------------------------------------------------------------ // getKillSetForNode: Return the registers killed by the given tree node. // // Arguments: // tree - the tree for which the kill set is needed. // // Return Value: a register mask of the registers killed // regMaskTP LinearScan::getKillSetForNode(GenTree* tree) { regMaskTP killMask = RBM_NONE; switch (tree->OperGet()) { case GT_LSH: case GT_RSH: case GT_RSZ: case GT_ROL: case GT_ROR: #ifdef TARGET_X86 case GT_LSH_HI: case GT_RSH_LO: #endif killMask = getKillSetForShiftRotate(tree->AsOp()); break; case GT_MUL: case GT_MULHI: #if !defined(TARGET_64BIT) || defined(TARGET_ARM64) case GT_MUL_LONG: #endif killMask = getKillSetForMul(tree->AsOp()); break; case GT_MOD: case GT_DIV: case GT_UMOD: case GT_UDIV: killMask = getKillSetForModDiv(tree->AsOp()); break; case GT_STORE_OBJ: case GT_STORE_BLK: case GT_STORE_DYN_BLK: killMask = getKillSetForBlockStore(tree->AsBlk()); break; case GT_RETURNTRAP: killMask = compiler->compHelperCallKillSet(CORINFO_HELP_STOP_FOR_GC); break; case GT_CALL: killMask = getKillSetForCall(tree->AsCall()); break; case GT_STOREIND: killMask = getKillSetForStoreInd(tree->AsStoreInd()); break; #if defined(PROFILING_SUPPORTED) // If this method requires profiler ELT hook then mark these nodes as killing // callee trash registers (excluding RAX and XMM0). The reason for this is that // profiler callback would trash these registers. See vm\amd64\asmhelpers.asm for // more details. case GT_RETURN: killMask = getKillSetForReturn(); break; case GT_PROF_HOOK: killMask = getKillSetForProfilerHook(); break; #endif // PROFILING_SUPPORTED #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: killMask = getKillSetForHWIntrinsic(tree->AsHWIntrinsic()); break; #endif // FEATURE_HW_INTRINSICS default: // for all other 'tree->OperGet()' kinds, leave 'killMask' = RBM_NONE break; } return killMask; } #endif // DEBUG //------------------------------------------------------------------------ // buildKillPositionsForNode: // Given some tree node add refpositions for all the registers this node kills // // Arguments: // tree - the tree for which kill positions should be generated // currentLoc - the location at which the kills should be added // killMask - The mask of registers killed by this node // // Return Value: // true - kills were inserted // false - no kills were inserted // // Notes: // The return value is needed because if we have any kills, we need to make sure that // all defs are located AFTER the kills. On the other hand, if there aren't kills, // the multiple defs for a regPair are in different locations. // If we generate any kills, we will mark all currentLiveVars as being preferenced // to avoid the killed registers. This is somewhat conservative. // // This method can add kills even if killMask is RBM_NONE, if this tree is one of the // special cases that signals that we can't permit callee save registers to hold GC refs. bool LinearScan::buildKillPositionsForNode(GenTree* tree, LsraLocation currentLoc, regMaskTP killMask) { bool insertedKills = false; if (killMask != RBM_NONE) { addRefsForPhysRegMask(killMask, currentLoc, RefTypeKill, true); // TODO-CQ: It appears to be valuable for both fp and int registers to avoid killing the callee // save regs on infrequently executed paths. However, it results in a large number of asmDiffs, // many of which appear to be regressions (because there is more spill on the infrequently path), // but are not really because the frequent path becomes smaller. Validating these diffs will need // to be done before making this change. // Also note that we avoid setting callee-save preferences for floating point. This may need // revisiting, and note that it doesn't currently apply to SIMD types, only float or double. // if (!blockSequence[curBBSeqNum]->isRunRarely()) if (enregisterLocalVars) { VarSetOps::Iter iter(compiler, currentLiveVars); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { LclVarDsc* varDsc = compiler->lvaGetDescByTrackedIndex(varIndex); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE if (Compiler::varTypeNeedsPartialCalleeSave(varDsc->GetRegisterType())) { if (!VarSetOps::IsMember(compiler, largeVectorCalleeSaveCandidateVars, varIndex)) { continue; } } else #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE if (varTypeIsFloating(varDsc) && !VarSetOps::IsMember(compiler, fpCalleeSaveCandidateVars, varIndex)) { continue; } Interval* interval = getIntervalForLocalVar(varIndex); const bool isCallKill = ((killMask == RBM_INT_CALLEE_TRASH) || (killMask == RBM_CALLEE_TRASH)); if (isCallKill) { interval->preferCalleeSave = true; } // We are more conservative about allocating callee-saves registers to write-thru vars, since // a call only requires reloading after (not spilling before). So we record (above) the fact // that we'd prefer a callee-save register, but we don't update the preferences at this point. // See the "heuristics for writeThru intervals" in 'buildIntervals()'. if (!interval->isWriteThru || !isCallKill) { regMaskTP newPreferences = allRegs(interval->registerType) & (~killMask); if (newPreferences != RBM_NONE) { interval->updateRegisterPreferences(newPreferences); } else { // If there are no callee-saved registers, the call could kill all the registers. // This is a valid state, so in that case assert should not trigger. The RA will spill in order // to free a register later. assert(compiler->opts.compDbgEnC || (calleeSaveRegs(varDsc->lvType)) == RBM_NONE); } } } } insertedKills = true; } if (compiler->killGCRefs(tree)) { RefPosition* pos = newRefPosition((Interval*)nullptr, currentLoc, RefTypeKillGCRefs, tree, (allRegs(TYP_REF) & ~RBM_ARG_REGS)); insertedKills = true; } return insertedKills; } //------------------------------------------------------------------------ // LinearScan::isCandidateMultiRegLclVar: Check whether a MultiReg node should // remain a candidate MultiReg // // Arguments: // lclNode - the GT_LCL_VAR or GT_STORE_LCL_VAR of interest // // Return Value: // true iff it remains a MultiReg lclVar. // // Notes: // When identifying candidates, the register allocator will only retain // promoted fields of a multi-reg local as candidates if all of its fields // are candidates. This is because of the added complexity of dealing with a // def or use of a multi-reg lclVar when only some of the fields have liveness // info. // At the time we determine whether a multi-reg lclVar can still be handled // as such, we've already completed Lowering, so during the build phase of // LSRA we have to reset the GTF_VAR_MULTIREG flag if necessary as we visit // each node. // bool LinearScan::isCandidateMultiRegLclVar(GenTreeLclVar* lclNode) { assert(compiler->lvaEnregMultiRegVars && lclNode->IsMultiReg()); LclVarDsc* varDsc = compiler->lvaGetDesc(lclNode); assert(varDsc->lvPromoted); bool isMultiReg = (compiler->lvaGetPromotionType(varDsc) == Compiler::PROMOTION_TYPE_INDEPENDENT); if (!isMultiReg) { lclNode->ClearMultiReg(); } #ifdef DEBUG for (unsigned int i = 0; i < varDsc->lvFieldCnt; i++) { LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + i); assert(isCandidateVar(fieldVarDsc) == isMultiReg); } #endif // DEBUG return isMultiReg; } //------------------------------------------------------------------------ // checkContainedOrCandidateLclVar: Check whether a GT_LCL_VAR node is a // candidate or contained. // // Arguments: // lclNode - the GT_LCL_VAR or GT_STORE_LCL_VAR of interest // // Return Value: // true if the node remains a candidate or is contained // false otherwise (i.e. if it will define a register) // // Notes: // We handle candidate variables differently from non-candidate ones. // If it is a candidate, we will simply add a use of it at its parent/consumer. // Otherwise, for a use we need to actually add the appropriate references for loading // or storing the variable. // // A candidate lclVar won't actually get used until the appropriate ancestor node // is processed, unless this is marked "isLocalDefUse" because it is a stack-based argument // to a call or an orphaned dead node. // // Also, because we do containment analysis before we redo dataflow and identify register // candidates, the containment analysis only uses !lvDoNotEnregister to estimate register // candidates. // If there is a lclVar that is estimated during Lowering to be register candidate but turns // out not to be, if a use was marked regOptional it should now be marked contained instead. // bool LinearScan::checkContainedOrCandidateLclVar(GenTreeLclVar* lclNode) { bool isCandidate; bool makeContained = false; // We shouldn't be calling this if this node was already contained. assert(!lclNode->isContained()); // If we have a multireg local, verify that its fields are still register candidates. if (lclNode->IsMultiReg()) { // Multi-reg uses must support containment, but if we have an actual multi-reg local // we don't want it to be RegOptional in fixed-use cases, so that we can ensure proper // liveness modeling (e.g. if one field is in a register required by another field, in // a RegOptional case we won't handle the conflict properly if we decide not to allocate). isCandidate = isCandidateMultiRegLclVar(lclNode); if (isCandidate) { assert(!lclNode->IsRegOptional()); } else { makeContained = true; } } else { isCandidate = compiler->lvaGetDesc(lclNode)->lvLRACandidate; makeContained = !isCandidate && lclNode->IsRegOptional(); } if (makeContained) { lclNode->ClearRegOptional(); lclNode->SetContained(); return true; } return isCandidate; } //---------------------------------------------------------------------------- // defineNewInternalTemp: Defines a ref position for an internal temp. // // Arguments: // tree - Gentree node requiring an internal register // regType - Register type // currentLoc - Location of the temp Def position // regMask - register mask of candidates for temp // RefPosition* LinearScan::defineNewInternalTemp(GenTree* tree, RegisterType regType, regMaskTP regMask) { Interval* current = newInterval(regType); current->isInternal = true; RefPosition* newDef = newRefPosition(current, currentLoc, RefTypeDef, tree, regMask, 0); assert(internalCount < MaxInternalCount); internalDefs[internalCount++] = newDef; return newDef; } //------------------------------------------------------------------------ // buildInternalRegisterDefForNode - Create an Interval for an internal int register, and a def RefPosition // // Arguments: // tree - Gentree node that needs internal registers // internalCands - The mask of valid registers // // Returns: // The def RefPosition created for this internal temp. // RefPosition* LinearScan::buildInternalIntRegisterDefForNode(GenTree* tree, regMaskTP internalCands) { // The candidate set should contain only integer registers. assert((internalCands & ~allRegs(TYP_INT)) == RBM_NONE); RefPosition* defRefPosition = defineNewInternalTemp(tree, IntRegisterType, internalCands); return defRefPosition; } //------------------------------------------------------------------------ // buildInternalFloatRegisterDefForNode - Create an Interval for an internal fp register, and a def RefPosition // // Arguments: // tree - Gentree node that needs internal registers // internalCands - The mask of valid registers // // Returns: // The def RefPosition created for this internal temp. // RefPosition* LinearScan::buildInternalFloatRegisterDefForNode(GenTree* tree, regMaskTP internalCands) { // The candidate set should contain only float registers. assert((internalCands & ~allRegs(TYP_FLOAT)) == RBM_NONE); RefPosition* defRefPosition = defineNewInternalTemp(tree, FloatRegisterType, internalCands); return defRefPosition; } //------------------------------------------------------------------------ // buildInternalRegisterUses - adds use positions for internal // registers required for tree node. // // Notes: // During the BuildNode process, calls to buildInternalIntRegisterDefForNode and // buildInternalFloatRegisterDefForNode put new RefPositions in the 'internalDefs' // array, and increment 'internalCount'. This method must be called to add corresponding // uses. It then resets the 'internalCount' for the handling of the next node. // // If the internal registers must differ from the target register, 'setInternalRegsDelayFree' // must be set to true, so that the uses may be marked 'delayRegFree'. // Note that if a node has both float and int temps, generally the target with either be // int *or* float, and it is not really necessary to set this on the other type, but it does // no harm as it won't restrict the register selection. // void LinearScan::buildInternalRegisterUses() { assert(internalCount <= MaxInternalCount); for (int i = 0; i < internalCount; i++) { RefPosition* def = internalDefs[i]; regMaskTP mask = def->registerAssignment; RefPosition* use = newRefPosition(def->getInterval(), currentLoc, RefTypeUse, def->treeNode, mask, 0); if (setInternalRegsDelayFree) { use->delayRegFree = true; pendingDelayFree = true; } } // internalCount = 0; } #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE //------------------------------------------------------------------------ // makeUpperVectorInterval - Create an Interval for saving and restoring // the upper half of a large vector. // // Arguments: // varIndex - The tracked index for a large vector lclVar. // void LinearScan::makeUpperVectorInterval(unsigned varIndex) { Interval* lclVarInterval = getIntervalForLocalVar(varIndex); assert(Compiler::varTypeNeedsPartialCalleeSave(lclVarInterval->registerType)); Interval* newInt = newInterval(LargeVectorSaveType); newInt->relatedInterval = lclVarInterval; newInt->isUpperVector = true; } //------------------------------------------------------------------------ // getUpperVectorInterval - Get the Interval for saving and restoring // the upper half of a large vector. // // Arguments: // varIndex - The tracked index for a large vector lclVar. // Interval* LinearScan::getUpperVectorInterval(unsigned varIndex) { // TODO-Throughput: Consider creating a map from varIndex to upperVector interval. for (Interval& interval : intervals) { if (interval.isLocalVar) { continue; } noway_assert(interval.isUpperVector); if (interval.relatedInterval->getVarIndex(compiler) == varIndex) { return &interval; } } unreached(); } //------------------------------------------------------------------------ // buildUpperVectorSaveRefPositions - Create special RefPositions for saving // the upper half of a set of large vectors. // // Arguments: // tree - The current node being handled // currentLoc - The location of the current node // fpCalleeKillSet - The set of registers killed by this node. // // Notes: This is called by BuildDefsWithKills for any node that kills registers in the // RBM_FLT_CALLEE_TRASH set. We actually need to find any calls that kill the upper-half // of the callee-save vector registers. // But we will use as a proxy any node that kills floating point registers. // (Note that some calls are masquerading as other nodes at this point so we can't just check for calls.) // void LinearScan::buildUpperVectorSaveRefPositions(GenTree* tree, LsraLocation currentLoc, regMaskTP fpCalleeKillSet) { if ((tree != nullptr) && tree->IsCall()) { if (tree->AsCall()->IsNoReturn()) { // No point in having vector save/restore if the call will not return. return; } } if (enregisterLocalVars && !VarSetOps::IsEmpty(compiler, largeVectorVars)) { // We assume that the kill set includes at least some callee-trash registers, but // that it doesn't include any callee-save registers. assert((fpCalleeKillSet & RBM_FLT_CALLEE_TRASH) != RBM_NONE); assert((fpCalleeKillSet & RBM_FLT_CALLEE_SAVED) == RBM_NONE); // We only need to save the upper half of any large vector vars that are currently live. VARSET_TP liveLargeVectors(VarSetOps::Intersection(compiler, currentLiveVars, largeVectorVars)); VarSetOps::Iter iter(compiler, liveLargeVectors); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { Interval* varInterval = getIntervalForLocalVar(varIndex); if (!varInterval->isPartiallySpilled) { Interval* upperVectorInterval = getUpperVectorInterval(varIndex); RefPosition* pos = newRefPosition(upperVectorInterval, currentLoc, RefTypeUpperVectorSave, tree, RBM_FLT_CALLEE_SAVED); varInterval->isPartiallySpilled = true; #ifdef TARGET_XARCH pos->regOptional = true; #endif } } } // For any non-lclVar intervals that are live at this point (i.e. in the DefList), we will also create // a RefTypeUpperVectorSave. For now these will all be spilled at this point, as we don't currently // have a mechanism to communicate any non-lclVar intervals that need to be restored. // TODO-CQ: We could consider adding such a mechanism, but it's unclear whether this rare // case of a large vector temp live across a call is worth the added complexity. for (RefInfoListNode *listNode = defList.Begin(), *end = defList.End(); listNode != end; listNode = listNode->Next()) { const GenTree* defNode = listNode->treeNode; var_types regType = defNode->TypeGet(); if (regType == TYP_STRUCT) { assert(defNode->OperIs(GT_LCL_VAR, GT_CALL)); if (defNode->OperIs(GT_LCL_VAR)) { const GenTreeLclVar* lcl = defNode->AsLclVar(); const LclVarDsc* varDsc = compiler->lvaGetDesc(lcl); regType = varDsc->GetRegisterType(); } else { const GenTreeCall* call = defNode->AsCall(); const CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd; Compiler::structPassingKind howToReturnStruct; regType = compiler->getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct); if (howToReturnStruct == Compiler::SPK_ByValueAsHfa) { regType = compiler->GetHfaType(retClsHnd); } #if defined(TARGET_ARM64) else if (howToReturnStruct == Compiler::SPK_ByValue) { // TODO-Cleanup: add a new Compiler::SPK for this case. // This is the case when 16-byte struct is returned as [x0, x1]. // We don't need a partial callee save. regType = TYP_LONG; } #endif // TARGET_ARM64 } assert((regType != TYP_STRUCT) && (regType != TYP_UNDEF)); } if (Compiler::varTypeNeedsPartialCalleeSave(regType)) { // In the rare case where such an interval is live across nested calls, we don't need to insert another. if (listNode->ref->getInterval()->recentRefPosition->refType != RefTypeUpperVectorSave) { RefPosition* pos = newRefPosition(listNode->ref->getInterval(), currentLoc, RefTypeUpperVectorSave, tree, RBM_FLT_CALLEE_SAVED); } } } } //------------------------------------------------------------------------ // buildUpperVectorRestoreRefPosition - Create a RefPosition for restoring // the upper half of a large vector. // // Arguments: // lclVarInterval - A lclVarInterval that is live at 'currentLoc' // currentLoc - The current location for which we're building RefPositions // node - The node, if any, that the restore would be inserted before. // If null, the restore will be inserted at the end of the block. // void LinearScan::buildUpperVectorRestoreRefPosition(Interval* lclVarInterval, LsraLocation currentLoc, GenTree* node) { if (lclVarInterval->isPartiallySpilled) { unsigned varIndex = lclVarInterval->getVarIndex(compiler); Interval* upperVectorInterval = getUpperVectorInterval(varIndex); RefPosition* pos = newRefPosition(upperVectorInterval, currentLoc, RefTypeUpperVectorRestore, node, RBM_NONE); lclVarInterval->isPartiallySpilled = false; #ifdef TARGET_XARCH pos->regOptional = true; #endif } } #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE #ifdef DEBUG //------------------------------------------------------------------------ // ComputeOperandDstCount: computes the number of registers defined by a // node. // // For most nodes, this is simple: // - Nodes that do not produce values (e.g. stores and other void-typed // nodes) and nodes that immediately use the registers they define // produce no registers // - Nodes that are marked as defining N registers define N registers. // // For contained nodes, however, things are more complicated: for purposes // of bookkeeping, a contained node is treated as producing the transitive // closure of the registers produced by its sources. // // Arguments: // operand - The operand for which to compute a register count. // // Returns: // The number of registers defined by `operand`. // int LinearScan::ComputeOperandDstCount(GenTree* operand) { // GT_ARGPLACE is the only non-LIR node that is currently in the trees at this stage, though // note that it is not in the linear order. if (operand->OperIs(GT_ARGPLACE)) { return 0; } if (operand->isContained()) { int dstCount = 0; for (GenTree* op : operand->Operands()) { dstCount += ComputeOperandDstCount(op); } return dstCount; } if (operand->IsUnusedValue()) { // Operands that define an unused value do not produce any registers. return 0; } if (operand->IsValue()) { // Operands that are values and are not contained consume all of their operands // and produce one or more registers. return operand->GetRegisterDstCount(compiler); } else { // This must be one of the operand types that are neither contained nor produce a value. // Stores and void-typed operands may be encountered when processing call nodes, which contain // pointers to argument setup stores. assert(operand->OperIsStore() || operand->OperIsBlkOp() || operand->OperIsPutArgStk() || operand->OperIsCompare() || operand->OperIs(GT_CMP) || operand->TypeGet() == TYP_VOID); return 0; } } //------------------------------------------------------------------------ // ComputeAvailableSrcCount: computes the number of registers available as // sources for a node. // // This is simply the sum of the number of registers produced by each // operand to the node. // // Arguments: // node - The node for which to compute a source count. // // Return Value: // The number of registers available as sources for `node`. // int LinearScan::ComputeAvailableSrcCount(GenTree* node) { int numSources = 0; for (GenTree* operand : node->Operands()) { numSources += ComputeOperandDstCount(operand); } return numSources; } #endif // DEBUG //------------------------------------------------------------------------ // buildRefPositionsForNode: The main entry point for building the RefPositions // and "tree temp" Intervals for a given node. // // Arguments: // tree - The node for which we are building RefPositions // currentLoc - The LsraLocation of the given node // void LinearScan::buildRefPositionsForNode(GenTree* tree, LsraLocation currentLoc) { // The LIR traversal doesn't visit GT_ARGPLACE nodes. // GT_CLS_VAR nodes should have been eliminated by rationalizer. assert(tree->OperGet() != GT_ARGPLACE); assert(tree->OperGet() != GT_CLS_VAR); // The set of internal temporary registers used by this node are stored in the // gtRsvdRegs register mask. Clear it out. tree->gtRsvdRegs = RBM_NONE; #ifdef DEBUG if (VERBOSE) { dumpDefList(); compiler->gtDispTree(tree, nullptr, nullptr, true); } #endif // DEBUG if (tree->isContained()) { #ifdef TARGET_XARCH // On XArch we can have contained candidate lclVars if they are part of a RMW // address computation. In this case we need to check whether it is a last use. if (tree->IsLocal() && ((tree->gtFlags & GTF_VAR_DEATH) != 0)) { LclVarDsc* const varDsc = compiler->lvaGetDesc(tree->AsLclVarCommon()); if (isCandidateVar(varDsc)) { assert(varDsc->lvTracked); unsigned varIndex = varDsc->lvVarIndex; VarSetOps::RemoveElemD(compiler, currentLiveVars, varIndex); } } #else // TARGET_XARCH assert(!isCandidateLocalRef(tree)); #endif // TARGET_XARCH JITDUMP("Contained\n"); return; } #ifdef DEBUG // If we are constraining the registers for allocation, we will modify all the RefPositions // we've built for this node after we've created them. In order to do that, we'll remember // the last RefPosition prior to those created for this node. RefPositionIterator refPositionMark = refPositions.backPosition(); int oldDefListCount = defList.Count(); #endif // DEBUG int consume = BuildNode(tree); #ifdef DEBUG int newDefListCount = defList.Count(); // Currently produce is unused, but need to strengthen an assert to check if produce is // as expected. See https://github.com/dotnet/runtime/issues/8678 int produce = newDefListCount - oldDefListCount; assert((consume == 0) || (ComputeAvailableSrcCount(tree) == consume)); // If we are constraining registers, modify all the RefPositions we've just built to specify the // minimum reg count required. if ((getStressLimitRegs() != LSRA_LIMIT_NONE) || (getSelectionHeuristics() != LSRA_SELECT_DEFAULT)) { // The number of registers required for a tree node is the sum of // { RefTypeUses } + { RefTypeDef for the node itself } + specialPutArgCount // This is the minimum set of registers that needs to be ensured in the candidate set of ref positions created. // // First, we count them. unsigned minRegCount = 0; RefPositionIterator iter = refPositionMark; for (iter++; iter != refPositions.end(); iter++) { RefPosition* newRefPosition = &(*iter); if (newRefPosition->isIntervalRef()) { if ((newRefPosition->refType == RefTypeUse) || ((newRefPosition->refType == RefTypeDef) && !newRefPosition->getInterval()->isInternal)) { minRegCount++; } #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE else if (newRefPosition->refType == RefTypeUpperVectorSave) { minRegCount++; } #endif if (newRefPosition->getInterval()->isSpecialPutArg) { minRegCount++; } } } if (tree->OperIsPutArgSplit()) { // While we have attempted to account for any "specialPutArg" defs above, we're only looking at RefPositions // created for this node. We must be defining at least one register in the PutArgSplit, so conservatively // add one less than the maximum number of registers args to 'minRegCount'. minRegCount += MAX_REG_ARG - 1; } for (refPositionMark++; refPositionMark != refPositions.end(); refPositionMark++) { RefPosition* newRefPosition = &(*refPositionMark); unsigned minRegCountForRef = minRegCount; if (RefTypeIsUse(newRefPosition->refType) && newRefPosition->delayRegFree) { // If delayRegFree, then Use will interfere with the destination of the consuming node. // Therefore, we also need add the kill set of the consuming node to minRegCount. // // For example consider the following IR on x86, where v01 and v02 // are method args coming in ecx and edx respectively. // GT_DIV(v01, v02) // // For GT_DIV, the minRegCount will be 3 without adding kill set of GT_DIV node. // // Assume further JitStressRegs=2, which would constrain candidates to callee trashable // regs { eax, ecx, edx } on use positions of v01 and v02. LSRA allocates ecx for v01. // The use position of v02 cannot be allocated a reg since it is marked delay-reg free and // {eax,edx} are getting killed before the def of GT_DIV. For this reason, minRegCount for // the use position of v02 also needs to take into account the kill set of its consuming node. regMaskTP killMask = getKillSetForNode(tree); if (killMask != RBM_NONE) { minRegCountForRef += genCountBits(killMask); } } else if ((newRefPosition->refType) == RefTypeDef && (newRefPosition->getInterval()->isSpecialPutArg)) { minRegCountForRef++; } newRefPosition->minRegCandidateCount = minRegCountForRef; if (newRefPosition->IsActualRef() && doReverseCallerCallee()) { Interval* interval = newRefPosition->getInterval(); regMaskTP oldAssignment = newRefPosition->registerAssignment; regMaskTP calleeSaveMask = calleeSaveRegs(interval->registerType); newRefPosition->registerAssignment = getConstrainedRegMask(oldAssignment, calleeSaveMask, minRegCountForRef); if ((newRefPosition->registerAssignment != oldAssignment) && (newRefPosition->refType == RefTypeUse) && !interval->isLocalVar) { checkConflictingDefUse(newRefPosition); } } } } #endif // DEBUG JITDUMP("\n"); } static const regNumber lsraRegOrder[] = {REG_VAR_ORDER}; const unsigned lsraRegOrderSize = ArrLen(lsraRegOrder); static const regNumber lsraRegOrderFlt[] = {REG_VAR_ORDER_FLT}; const unsigned lsraRegOrderFltSize = ArrLen(lsraRegOrderFlt); //------------------------------------------------------------------------ // buildPhysRegRecords: Make an interval for each physical register // void LinearScan::buildPhysRegRecords() { for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg)) { RegRecord* curr = &physRegs[reg]; curr->init(reg); } for (unsigned int i = 0; i < lsraRegOrderSize; i++) { regNumber reg = lsraRegOrder[i]; RegRecord* curr = &physRegs[reg]; curr->regOrder = (unsigned char)i; } for (unsigned int i = 0; i < lsraRegOrderFltSize; i++) { regNumber reg = lsraRegOrderFlt[i]; RegRecord* curr = &physRegs[reg]; curr->regOrder = (unsigned char)i; } } //------------------------------------------------------------------------ // insertZeroInitRefPositions: Handle lclVars that are live-in to the first block // // Notes: // Prior to calling this method, 'currentLiveVars' must be set to the set of register // candidate variables that are liveIn to the first block. // For each register candidate that is live-in to the first block: // - If it is a GC ref, or if compInitMem is set, a ZeroInit RefPosition will be created. // - Otherwise, it will be marked as spilled, since it will not be assigned a register // on entry and will be loaded from memory on the undefined path. // Note that, when the compInitMem option is not set, we may encounter these on // paths that are protected by the same condition as an earlier def. However, since // we don't do the analysis to determine this - and couldn't rely on always identifying // such cases even if we tried - we must conservatively treat the undefined path as // being possible. This is a relatively rare case, so the introduced conservatism is // not expected to warrant the analysis required to determine the best placement of // an initialization. // void LinearScan::insertZeroInitRefPositions() { assert(enregisterLocalVars); #ifdef DEBUG VARSET_TP expectedLiveVars(VarSetOps::Intersection(compiler, registerCandidateVars, compiler->fgFirstBB->bbLiveIn)); assert(VarSetOps::Equal(compiler, currentLiveVars, expectedLiveVars)); #endif // DEBUG // insert defs for this, then a block boundary VarSetOps::Iter iter(compiler, currentLiveVars); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { LclVarDsc* varDsc = compiler->lvaGetDescByTrackedIndex(varIndex); if (!varDsc->lvIsParam && isCandidateVar(varDsc)) { JITDUMP("V%02u was live in to first block:", compiler->lvaTrackedIndexToLclNum(varIndex)); Interval* interval = getIntervalForLocalVar(varIndex); if (compiler->info.compInitMem || varTypeIsGC(varDsc->TypeGet())) { varDsc->lvMustInit = true; // OSR will handle init of locals and promoted fields thereof if (compiler->lvaIsOSRLocal(compiler->lvaTrackedIndexToLclNum(varIndex))) { JITDUMP(" will be initialized by OSR\n"); // setIntervalAsSpilled(interval); varDsc->lvMustInit = false; } JITDUMP(" creating ZeroInit\n"); RefPosition* pos = newRefPosition(interval, MinLocation, RefTypeZeroInit, nullptr /* theTreeNode */, allRegs(interval->registerType)); pos->setRegOptional(true); } else { setIntervalAsSpilled(interval); JITDUMP(" marking as spilled\n"); } } } // We must also insert zero-inits for any finallyVars if they are refs or if compInitMem is true. if (compiler->lvaEnregEHVars) { VarSetOps::Iter iter(compiler, finallyVars); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { LclVarDsc* varDsc = compiler->lvaGetDescByTrackedIndex(varIndex); if (!varDsc->lvIsParam && isCandidateVar(varDsc)) { JITDUMP("V%02u is a finally var:", compiler->lvaTrackedIndexToLclNum(varIndex)); Interval* interval = getIntervalForLocalVar(varIndex); if (compiler->info.compInitMem || varTypeIsGC(varDsc->TypeGet())) { if (interval->recentRefPosition == nullptr) { JITDUMP(" creating ZeroInit\n"); RefPosition* pos = newRefPosition(interval, MinLocation, RefTypeZeroInit, nullptr /* theTreeNode */, allRegs(interval->registerType)); pos->setRegOptional(true); varDsc->lvMustInit = true; } else { // We must only generate one entry RefPosition for each Interval. Since this is not // a parameter, it can't be RefTypeParamDef, so it must be RefTypeZeroInit, which // we must have generated for the live-in case above. assert(interval->recentRefPosition->refType == RefTypeZeroInit); JITDUMP(" already ZeroInited\n"); } } } } } } #if defined(UNIX_AMD64_ABI) //------------------------------------------------------------------------ // unixAmd64UpdateRegStateForArg: Sets the register state for an argument of type STRUCT for System V systems. // // Arguments: // argDsc - the LclVarDsc for the argument of interest // // Notes: // See Compiler::raUpdateRegStateForArg(RegState *regState, LclVarDsc *argDsc) in regalloc.cpp // for how state for argument is updated for unix non-structs and Windows AMD64 structs. // void LinearScan::unixAmd64UpdateRegStateForArg(LclVarDsc* argDsc) { assert(varTypeIsStruct(argDsc)); RegState* intRegState = &compiler->codeGen->intRegState; RegState* floatRegState = &compiler->codeGen->floatRegState; if ((argDsc->GetArgReg() != REG_STK) && (argDsc->GetArgReg() != REG_NA)) { if (genRegMask(argDsc->GetArgReg()) & (RBM_ALLFLOAT)) { assert(genRegMask(argDsc->GetArgReg()) & (RBM_FLTARG_REGS)); floatRegState->rsCalleeRegArgMaskLiveIn |= genRegMask(argDsc->GetArgReg()); } else { assert(genRegMask(argDsc->GetArgReg()) & (RBM_ARG_REGS)); intRegState->rsCalleeRegArgMaskLiveIn |= genRegMask(argDsc->GetArgReg()); } } if ((argDsc->GetOtherArgReg() != REG_STK) && (argDsc->GetOtherArgReg() != REG_NA)) { if (genRegMask(argDsc->GetOtherArgReg()) & (RBM_ALLFLOAT)) { assert(genRegMask(argDsc->GetOtherArgReg()) & (RBM_FLTARG_REGS)); floatRegState->rsCalleeRegArgMaskLiveIn |= genRegMask(argDsc->GetOtherArgReg()); } else { assert(genRegMask(argDsc->GetOtherArgReg()) & (RBM_ARG_REGS)); intRegState->rsCalleeRegArgMaskLiveIn |= genRegMask(argDsc->GetOtherArgReg()); } } } #endif // defined(UNIX_AMD64_ABI) //------------------------------------------------------------------------ // updateRegStateForArg: Updates rsCalleeRegArgMaskLiveIn for the appropriate // regState (either compiler->intRegState or compiler->floatRegState), // with the lvArgReg on "argDsc" // // Arguments: // argDsc - the argument for which the state is to be updated. // // Return Value: None // // Assumptions: // The argument is live on entry to the function // (or is untracked and therefore assumed live) // // Notes: // This relies on a method in regAlloc.cpp that is shared between LSRA // and regAlloc. It is further abstracted here because regState is updated // separately for tracked and untracked variables in LSRA. // void LinearScan::updateRegStateForArg(LclVarDsc* argDsc) { #if defined(UNIX_AMD64_ABI) // For System V AMD64 calls the argDsc can have 2 registers (for structs.) // Handle them here. if (varTypeIsStruct(argDsc)) { unixAmd64UpdateRegStateForArg(argDsc); } else #endif // defined(UNIX_AMD64_ABI) { RegState* intRegState = &compiler->codeGen->intRegState; RegState* floatRegState = &compiler->codeGen->floatRegState; bool isFloat = emitter::isFloatReg(argDsc->GetArgReg()); if (argDsc->lvIsHfaRegArg()) { isFloat = true; } if (isFloat) { JITDUMP("Float arg V%02u in reg %s\n", compiler->lvaGetLclNum(argDsc), getRegName(argDsc->GetArgReg())); compiler->raUpdateRegStateForArg(floatRegState, argDsc); } else { JITDUMP("Int arg V%02u in reg %s\n", compiler->lvaGetLclNum(argDsc), getRegName(argDsc->GetArgReg())); #if FEATURE_MULTIREG_ARGS if (argDsc->GetOtherArgReg() != REG_NA) { JITDUMP("(second half) in reg %s\n", getRegName(argDsc->GetOtherArgReg())); } #endif // FEATURE_MULTIREG_ARGS compiler->raUpdateRegStateForArg(intRegState, argDsc); } } } //------------------------------------------------------------------------ // buildIntervals: The main entry point for building the data structures over // which we will do register allocation. // void LinearScan::buildIntervals() { BasicBlock* block; JITDUMP("\nbuildIntervals ========\n"); // Build (empty) records for all of the physical registers buildPhysRegRecords(); #ifdef DEBUG if (VERBOSE) { printf("\n-----------------\n"); printf("LIVENESS:\n"); printf("-----------------\n"); for (BasicBlock* const block : compiler->Blocks()) { printf(FMT_BB " use def in out\n", block->bbNum); dumpConvertedVarSet(compiler, block->bbVarUse); printf("\n"); dumpConvertedVarSet(compiler, block->bbVarDef); printf("\n"); dumpConvertedVarSet(compiler, block->bbLiveIn); printf("\n"); dumpConvertedVarSet(compiler, block->bbLiveOut); printf("\n"); } } #endif // DEBUG #if DOUBLE_ALIGN // We will determine whether we should double align the frame during // identifyCandidates(), but we initially assume that we will not. doDoubleAlign = false; #endif identifyCandidates(); // Figure out if we're going to use a frame pointer. We need to do this before building // the ref positions, because those objects will embed the frame register in various register masks // if the frame pointer is not reserved. If we decide to have a frame pointer, setFrameType() will // remove the frame pointer from the masks. setFrameType(); DBEXEC(VERBOSE, TupleStyleDump(LSRA_DUMP_PRE)); // second part: JITDUMP("\nbuildIntervals second part ========\n"); currentLoc = 0; // TODO-Cleanup: This duplicates prior behavior where entry (ParamDef) RefPositions were // being assigned the bbNum of the last block traversed in the 2nd phase of Lowering. // Previously, the block sequencing was done for the (formerly separate) Build pass, // and the curBBNum was left as the last block sequenced. This block was then used to set the // weight for the entry (ParamDef) RefPositions. It would be logical to set this to the // normalized entry weight (compiler->fgCalledCount), but that results in a net regression. if (!blockSequencingDone) { setBlockSequence(); } // Next, create ParamDef RefPositions for all the tracked parameters, in order of their varIndex. // Assign these RefPositions to the (nonexistent) BB0. curBBNum = 0; RegState* intRegState = &compiler->codeGen->intRegState; RegState* floatRegState = &compiler->codeGen->floatRegState; intRegState->rsCalleeRegArgMaskLiveIn = RBM_NONE; floatRegState->rsCalleeRegArgMaskLiveIn = RBM_NONE; for (unsigned int varIndex = 0; varIndex < compiler->lvaTrackedCount; varIndex++) { LclVarDsc* argDsc = compiler->lvaGetDescByTrackedIndex(varIndex); if (!argDsc->lvIsParam) { continue; } // Only reserve a register if the argument is actually used. // Is it dead on entry? If compJmpOpUsed is true, then the arguments // have to be kept alive, so we have to consider it as live on entry. // Use lvRefCnt instead of checking bbLiveIn because if it's volatile we // won't have done dataflow on it, but it needs to be marked as live-in so // it will get saved in the prolog. if (!compiler->compJmpOpUsed && argDsc->lvRefCnt() == 0 && !compiler->opts.compDbgCode) { continue; } if (argDsc->lvIsRegArg) { updateRegStateForArg(argDsc); } if (isCandidateVar(argDsc)) { Interval* interval = getIntervalForLocalVar(varIndex); const var_types regType = argDsc->GetRegisterType(); regMaskTP mask = allRegs(regType); if (argDsc->lvIsRegArg) { // Set this interval as currently assigned to that register regNumber inArgReg = argDsc->GetArgReg(); assert(inArgReg < REG_COUNT); mask = genRegMask(inArgReg); assignPhysReg(inArgReg, interval); INDEBUG(registersToDump |= getRegMask(inArgReg, interval->registerType)); } RefPosition* pos = newRefPosition(interval, MinLocation, RefTypeParamDef, nullptr, mask); pos->setRegOptional(true); } else if (varTypeIsStruct(argDsc->lvType)) { for (unsigned fieldVarNum = argDsc->lvFieldLclStart; fieldVarNum < argDsc->lvFieldLclStart + argDsc->lvFieldCnt; ++fieldVarNum) { const LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(fieldVarNum); if (fieldVarDsc->lvLRACandidate) { assert(fieldVarDsc->lvTracked); Interval* interval = getIntervalForLocalVar(fieldVarDsc->lvVarIndex); RefPosition* pos = newRefPosition(interval, MinLocation, RefTypeParamDef, nullptr, allRegs(TypeGet(fieldVarDsc))); pos->setRegOptional(true); } } } else { // We can overwrite the register (i.e. codegen saves it on entry) assert(argDsc->lvRefCnt() == 0 || !argDsc->lvIsRegArg || argDsc->lvDoNotEnregister || !argDsc->lvLRACandidate || (varTypeIsFloating(argDsc->TypeGet()) && compiler->opts.compDbgCode)); } } // Now set up the reg state for the non-tracked args // (We do this here because we want to generate the ParamDef RefPositions in tracked // order, so that loop doesn't hit the non-tracked args) for (unsigned argNum = 0; argNum < compiler->info.compArgsCount; argNum++) { LclVarDsc* argDsc = compiler->lvaGetDesc(argNum); if (argDsc->lvPromotedStruct()) { for (unsigned fieldVarNum = argDsc->lvFieldLclStart; fieldVarNum < argDsc->lvFieldLclStart + argDsc->lvFieldCnt; ++fieldVarNum) { LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(fieldVarNum); noway_assert(fieldVarDsc->lvIsParam); if (!fieldVarDsc->lvTracked && fieldVarDsc->lvIsRegArg) { updateRegStateForArg(fieldVarDsc); } } } else { noway_assert(argDsc->lvIsParam); if (!argDsc->lvTracked && argDsc->lvIsRegArg) { updateRegStateForArg(argDsc); } } } // If there is a secret stub param, it is also live in if (compiler->info.compPublishStubParam) { intRegState->rsCalleeRegArgMaskLiveIn |= RBM_SECRET_STUB_PARAM; } BasicBlock* predBlock = nullptr; BasicBlock* prevBlock = nullptr; // Initialize currentLiveVars to the empty set. We will set it to the current // live-in at the entry to each block (this will include the incoming args on // the first block). VarSetOps::AssignNoCopy(compiler, currentLiveVars, VarSetOps::MakeEmpty(compiler)); for (block = startBlockSequence(); block != nullptr; block = moveToNextBlock()) { JITDUMP("\nNEW BLOCK " FMT_BB "\n", block->bbNum); bool predBlockIsAllocated = false; predBlock = findPredBlockForLiveIn(block, prevBlock DEBUGARG(&predBlockIsAllocated)); if (predBlock != nullptr) { JITDUMP("\n\nSetting " FMT_BB " as the predecessor for determining incoming variable registers of " FMT_BB "\n", predBlock->bbNum, block->bbNum); assert(predBlock->bbNum <= bbNumMaxBeforeResolution); blockInfo[block->bbNum].predBBNum = predBlock->bbNum; } if (enregisterLocalVars) { VarSetOps::AssignNoCopy(compiler, currentLiveVars, VarSetOps::Intersection(compiler, registerCandidateVars, block->bbLiveIn)); if (block == compiler->fgFirstBB) { insertZeroInitRefPositions(); // The first real location is at 1; 0 is for the entry. currentLoc = 1; } // For blocks that don't have EHBoundaryIn, we need DummyDefs for cases where "predBlock" isn't // really a predecessor. // Note that it's possible to have uses of unitialized variables, in which case even the first // block may require DummyDefs, which we are not currently adding - this means that these variables // will always be considered to be in memory on entry (and reloaded when the use is encountered). // TODO-CQ: Consider how best to tune this. Currently, if we create DummyDefs for uninitialized // variables (which may actually be initialized along the dynamically executed paths, but not // on all static paths), we wind up with excessive liveranges for some of these variables. if (!blockInfo[block->bbNum].hasEHBoundaryIn) { // Any lclVars live-in on a non-EH boundary edge are resolution candidates. VarSetOps::UnionD(compiler, resolutionCandidateVars, currentLiveVars); if (block != compiler->fgFirstBB) { VARSET_TP newLiveIn(VarSetOps::MakeCopy(compiler, currentLiveVars)); if (predBlock != nullptr) { // Compute set difference: newLiveIn = currentLiveVars - predBlock->bbLiveOut VarSetOps::DiffD(compiler, newLiveIn, predBlock->bbLiveOut); } // Don't create dummy defs for EH vars; we'll load them from the stack as/when needed. VarSetOps::DiffD(compiler, newLiveIn, exceptVars); // Create dummy def RefPositions if (!VarSetOps::IsEmpty(compiler, newLiveIn)) { // If we are using locations from a predecessor, we should never require DummyDefs. assert(!predBlockIsAllocated); JITDUMP("Creating dummy definitions\n"); VarSetOps::Iter iter(compiler, newLiveIn); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { // Add a dummyDef for any candidate vars that are in the "newLiveIn" set. LclVarDsc* varDsc = compiler->lvaGetDescByTrackedIndex(varIndex); assert(isCandidateVar(varDsc)); Interval* interval = getIntervalForLocalVar(varIndex); RefPosition* pos = newRefPosition(interval, currentLoc, RefTypeDummyDef, nullptr, allRegs(interval->registerType)); pos->setRegOptional(true); } JITDUMP("Finished creating dummy definitions\n\n"); } } } } // Add a dummy RefPosition to mark the block boundary. // Note that we do this AFTER adding the exposed uses above, because the // register positions for those exposed uses need to be recorded at // this point. RefPosition* pos = newRefPosition((Interval*)nullptr, currentLoc, RefTypeBB, nullptr, RBM_NONE); currentLoc += 2; JITDUMP("\n"); if (firstColdLoc == MaxLocation) { if (block->isRunRarely()) { firstColdLoc = currentLoc; JITDUMP("firstColdLoc = %d\n", firstColdLoc); } } else { // TODO: We'd like to assert the following but we don't currently ensure that only // "RunRarely" blocks are contiguous. // (The funclets will generally be last, but we don't follow layout order, so we // don't have to preserve that in the block sequence.) // assert(block->isRunRarely()); } // For frame poisoning we generate code into scratch BB right after prolog since // otherwise the prolog might become too large. In this case we will put the poison immediate // into the scratch register, so it will be killed here. if (compiler->compShouldPoisonFrame() && compiler->fgFirstBBisScratch() && block == compiler->fgFirstBB) { regMaskTP killed; #if defined(TARGET_XARCH) // Poisoning uses EAX for small vars and rep stosd that kills edi, ecx and eax for large vars. killed = RBM_EDI | RBM_ECX | RBM_EAX; #else // Poisoning uses REG_SCRATCH for small vars and memset helper for big vars. killed = genRegMask(REG_SCRATCH) | compiler->compHelperCallKillSet(CORINFO_HELP_MEMSET); #endif addRefsForPhysRegMask(killed, currentLoc + 1, RefTypeKill, true); currentLoc += 2; } LIR::Range& blockRange = LIR::AsRange(block); for (GenTree* node : blockRange) { // We increment the location of each tree node by 2 so that the node definition, if any, // is at a new location and doesn't interfere with the uses. // For multi-reg local stores, the 'BuildMultiRegStoreLoc' method will further increment the // location by 2 for each destination register beyond the first. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG node->gtSeqNum = currentLoc; // In DEBUG, we want to set the gtRegTag to GT_REGTAG_REG, so that subsequent dumps will show the register // value. // Although this looks like a no-op it sets the tag. node->SetRegNum(node->GetRegNum()); #endif buildRefPositionsForNode(node, currentLoc); #ifdef DEBUG if (currentLoc > maxNodeLocation) { maxNodeLocation = currentLoc; } #endif // DEBUG currentLoc += 2; } #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // At the end of each block, create upperVectorRestores for any largeVectorVars that may be // partiallySpilled (during the build phase all intervals will be marked isPartiallySpilled if // they *may) be partially spilled at any point. if (enregisterLocalVars) { VarSetOps::Iter largeVectorVarsIter(compiler, largeVectorVars); unsigned largeVectorVarIndex = 0; while (largeVectorVarsIter.NextElem(&largeVectorVarIndex)) { Interval* lclVarInterval = getIntervalForLocalVar(largeVectorVarIndex); buildUpperVectorRestoreRefPosition(lclVarInterval, currentLoc, nullptr); } } #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE // Note: the visited set is cleared in LinearScan::doLinearScan() markBlockVisited(block); if (!defList.IsEmpty()) { INDEBUG(dumpDefList()); assert(!"Expected empty defList at end of block"); } if (enregisterLocalVars) { // Insert exposed uses for a lclVar that is live-out of 'block' but not live-in to the // next block, or any unvisited successors. // This will address lclVars that are live on a backedge, as well as those that are kept // live at a GT_JMP. // // Blocks ending with "jmp method" are marked as BBJ_HAS_JMP, // and jmp call is represented using GT_JMP node which is a leaf node. // Liveness phase keeps all the arguments of the method live till the end of // block by adding them to liveout set of the block containing GT_JMP. // // The target of a GT_JMP implicitly uses all the current method arguments, however // there are no actual references to them. This can cause LSRA to assert, because // the variables are live but it sees no references. In order to correctly model the // liveness of these arguments, we add dummy exposed uses, in the same manner as for // backward branches. This will happen automatically via expUseSet. // // Note that a block ending with GT_JMP has no successors and hence the variables // for which dummy use ref positions are added are arguments of the method. VARSET_TP expUseSet(VarSetOps::MakeCopy(compiler, block->bbLiveOut)); VarSetOps::IntersectionD(compiler, expUseSet, registerCandidateVars); BasicBlock* nextBlock = getNextBlock(); if (nextBlock != nullptr) { VarSetOps::DiffD(compiler, expUseSet, nextBlock->bbLiveIn); } for (BasicBlock* succ : block->GetAllSuccs(compiler)) { if (VarSetOps::IsEmpty(compiler, expUseSet)) { break; } if (isBlockVisited(succ)) { continue; } VarSetOps::DiffD(compiler, expUseSet, succ->bbLiveIn); } if (!VarSetOps::IsEmpty(compiler, expUseSet)) { JITDUMP("Exposed uses:"); VarSetOps::Iter iter(compiler, expUseSet); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { unsigned varNum = compiler->lvaTrackedToVarNum[varIndex]; const LclVarDsc* varDsc = compiler->lvaGetDesc(varNum); assert(isCandidateVar(varDsc)); Interval* interval = getIntervalForLocalVar(varIndex); RefPosition* pos = newRefPosition(interval, currentLoc, RefTypeExpUse, nullptr, allRegs(interval->registerType)); pos->setRegOptional(true); JITDUMP(" V%02u", varNum); } JITDUMP("\n"); } // Clear the "last use" flag on any vars that are live-out from this block. VARSET_TP bbLiveDefs(VarSetOps::Intersection(compiler, registerCandidateVars, block->bbLiveOut)); VarSetOps::Iter iter(compiler, bbLiveDefs); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { unsigned varNum = compiler->lvaTrackedToVarNum[varIndex]; LclVarDsc* const varDsc = compiler->lvaGetDesc(varNum); assert(isCandidateVar(varDsc)); RefPosition* const lastRP = getIntervalForLocalVar(varIndex)->lastRefPosition; // We should be able to assert that lastRP is non-null if it is live-out, but sometimes liveness // lies. if ((lastRP != nullptr) && (lastRP->bbNum == block->bbNum)) { lastRP->lastUse = false; } } #ifdef DEBUG checkLastUses(block); if (VERBOSE) { printf("use: "); dumpConvertedVarSet(compiler, block->bbVarUse); printf("\ndef: "); dumpConvertedVarSet(compiler, block->bbVarDef); printf("\n"); } #endif // DEBUG } prevBlock = block; } if (enregisterLocalVars) { if (compiler->lvaKeepAliveAndReportThis()) { // If we need to KeepAliveAndReportThis, add a dummy exposed use of it at the end unsigned keepAliveVarNum = compiler->info.compThisArg; assert(compiler->info.compIsStatic == false); const LclVarDsc* varDsc = compiler->lvaGetDesc(keepAliveVarNum); if (isCandidateVar(varDsc)) { JITDUMP("Adding exposed use of this, for lvaKeepAliveAndReportThis\n"); Interval* interval = getIntervalForLocalVar(varDsc->lvVarIndex); RefPosition* pos = newRefPosition(interval, currentLoc, RefTypeExpUse, nullptr, allRegs(interval->registerType)); pos->setRegOptional(true); } } // Adjust heuristics for writeThru intervals. if (compiler->compHndBBtabCount > 0) { VarSetOps::Iter iter(compiler, exceptVars); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { unsigned varNum = compiler->lvaTrackedToVarNum[varIndex]; LclVarDsc* varDsc = compiler->lvaGetDesc(varNum); Interval* interval = getIntervalForLocalVar(varIndex); assert(interval->isWriteThru); weight_t weight = varDsc->lvRefCntWtd(); // We'd like to only allocate registers for EH vars that have enough uses // to compensate for the additional registers being live (and for the possibility // that we may have to insert an additional copy). // However, we don't currently have that information available. Instead, we'll // aggressively assume that these vars are defined once, at their first RefPosition. // RefPosition* firstRefPosition = interval->firstRefPosition; // Incoming reg args are given an initial weight of 2 * BB_UNITY_WEIGHT // (see lvaComputeRefCounts(); this may be reviewed/changed in future). // weight_t initialWeight = (firstRefPosition->refType == RefTypeParamDef) ? (2 * BB_UNITY_WEIGHT) : blockInfo[firstRefPosition->bbNum].weight; weight -= initialWeight; // If the remaining weight is less than the initial weight, we'd like to allocate it only // opportunistically, but we don't currently have a mechanism to do so. // For now, we'll just avoid using callee-save registers if the weight is too low. if (interval->preferCalleeSave) { // The benefit of a callee-save register isn't as high as it would be for a normal arg. // We'll have at least the cost of saving & restoring the callee-save register, // so we won't break even until we have at least 4 * BB_UNITY_WEIGHT. // Given that we also don't have a good way to tell whether the variable is live // across a call in the non-EH code, we'll be extra conservative about this. // Note that for writeThru intervals we don't update the preferences to be only callee-save. unsigned calleeSaveCount = (varTypeUsesFloatReg(interval->registerType)) ? CNT_CALLEE_SAVED_FLOAT : CNT_CALLEE_ENREG; if ((weight <= (BB_UNITY_WEIGHT * 7)) || varDsc->lvVarIndex >= calleeSaveCount) { // If this is relatively low weight, don't prefer callee-save at all. interval->preferCalleeSave = false; } else { // In other cases, we'll add in the callee-save regs to the preferences, but not clear // the non-callee-save regs . We also handle this case specially in tryAllocateFreeReg(). interval->registerPreferences |= calleeSaveRegs(interval->registerType); } } } } #ifdef DEBUG if (getLsraExtendLifeTimes()) { for (unsigned lclNum = 0; lclNum < compiler->lvaCount; lclNum++) { LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum); if (varDsc->lvLRACandidate) { JITDUMP("Adding exposed use of V%02u for LsraExtendLifetimes\n", lclNum); Interval* interval = getIntervalForLocalVar(varDsc->lvVarIndex); RefPosition* pos = newRefPosition(interval, currentLoc, RefTypeExpUse, nullptr, allRegs(interval->registerType)); pos->setRegOptional(true); } } } #endif // DEBUG } // If the last block has successors, create a RefTypeBB to record // what's live if (prevBlock->NumSucc(compiler) > 0) { RefPosition* pos = newRefPosition((Interval*)nullptr, currentLoc, RefTypeBB, nullptr, RBM_NONE); } #ifdef DEBUG // Make sure we don't have any blocks that were not visited for (BasicBlock* const block : compiler->Blocks()) { assert(isBlockVisited(block)); } if (VERBOSE) { lsraDumpIntervals("BEFORE VALIDATING INTERVALS"); dumpRefPositions("BEFORE VALIDATING INTERVALS"); } validateIntervals(); #endif // DEBUG } #ifdef DEBUG //------------------------------------------------------------------------ // validateIntervals: A DEBUG-only method that checks that: // - the lclVar RefPositions do not reflect uses of undefined values // - A singleDef interval should have just first RefPosition as RefTypeDef. // // TODO-Cleanup: If an undefined use is encountered, it merely prints a message // but probably assert. // void LinearScan::validateIntervals() { if (enregisterLocalVars) { for (unsigned i = 0; i < compiler->lvaTrackedCount; i++) { if (!compiler->lvaGetDescByTrackedIndex(i)->lvLRACandidate) { continue; } Interval* interval = getIntervalForLocalVar(i); bool defined = false; unsigned lastUseBBNum = 0; JITDUMP("-----------------\n"); for (RefPosition* ref = interval->firstRefPosition; ref != nullptr; ref = ref->nextRefPosition) { if (VERBOSE) { ref->dump(this); } RefType refType = ref->refType; if (!defined && RefTypeIsUse(refType) && (lastUseBBNum == ref->bbNum)) { if (!ref->lastUse) { if (compiler->info.compMethodName != nullptr) { JITDUMP("%s: ", compiler->info.compMethodName); } JITDUMP("LocalVar V%02u: undefined use at %u\n", interval->varNum, ref->nodeLocation); assert(false); } } // For single-def intervals, the only the first refposition should be a RefTypeDef if (interval->isSingleDef && RefTypeIsDef(refType)) { assert(ref == interval->firstRefPosition); } // Note that there can be multiple last uses if they are on disjoint paths, // so we can't really check the lastUse flag if (ref->lastUse) { defined = false; lastUseBBNum = ref->bbNum; } if (RefTypeIsDef(refType)) { defined = true; } } } } } #endif // DEBUG #if defined(TARGET_XARCH) || defined(FEATURE_HW_INTRINSICS) //------------------------------------------------------------------------ // setTgtPref: Set a preference relationship between the given Interval // and a Use RefPosition. // // Arguments: // interval - An interval whose defining instruction has tgtPrefUse as a use // tgtPrefUse - The use RefPosition // // Notes: // This is called when we would like tgtPrefUse and this def to get the same register. // This is only desirable if the use is a last use, which it is if it is a non-local, // *or* if it is a lastUse. // Note that we don't yet have valid lastUse information in the RefPositions that we're building // (every RefPosition is set as a lastUse until we encounter a new use), so we have to rely on the treeNode. // This may be called for multiple uses, in which case 'interval' will only get preferenced at most // to the first one (if it didn't already have a 'relatedInterval'. // void setTgtPref(Interval* interval, RefPosition* tgtPrefUse) { if (tgtPrefUse != nullptr) { Interval* useInterval = tgtPrefUse->getInterval(); if (!useInterval->isLocalVar || (tgtPrefUse->treeNode == nullptr) || ((tgtPrefUse->treeNode->gtFlags & GTF_VAR_DEATH) != 0)) { // Set the use interval as related to the interval we're defining. useInterval->assignRelatedIntervalIfUnassigned(interval); } } } #endif // TARGET_XARCH || FEATURE_HW_INTRINSICS //------------------------------------------------------------------------ // BuildDef: Build a RefTypeDef RefPosition for the given node // // Arguments: // tree - The node that defines a register // dstCandidates - The candidate registers for the definition // multiRegIdx - The index of the definition, defaults to zero. // Only non-zero for multi-reg nodes. // // Return Value: // The newly created RefPosition. // // Notes: // Adds the RefInfo for the definition to the defList. // RefPosition* LinearScan::BuildDef(GenTree* tree, regMaskTP dstCandidates, int multiRegIdx) { assert(!tree->isContained()); if (dstCandidates != RBM_NONE) { assert((tree->GetRegNum() == REG_NA) || (dstCandidates == genRegMask(tree->GetRegByIndex(multiRegIdx)))); } RegisterType type; if (!tree->IsMultiRegNode()) { type = getDefType(tree); } else { type = tree->GetRegTypeByIndex(multiRegIdx); } if (varTypeUsesFloatReg(type)) { compiler->compFloatingPointUsed = true; } Interval* interval = newInterval(type); if (tree->GetRegNum() != REG_NA) { if (!tree->IsMultiRegNode() || (multiRegIdx == 0)) { assert((dstCandidates == RBM_NONE) || (dstCandidates == genRegMask(tree->GetRegNum()))); dstCandidates = genRegMask(tree->GetRegNum()); } else { assert(isSingleRegister(dstCandidates)); } } #ifdef TARGET_X86 else if (varTypeIsByte(tree)) { if (dstCandidates == RBM_NONE) { dstCandidates = allRegs(TYP_INT); } dstCandidates &= ~RBM_NON_BYTE_REGS; assert(dstCandidates != RBM_NONE); } #endif // TARGET_X86 if (pendingDelayFree) { interval->hasInterferingUses = true; // pendingDelayFree = false; } RefPosition* defRefPosition = newRefPosition(interval, currentLoc + 1, RefTypeDef, tree, dstCandidates, multiRegIdx); if (tree->IsUnusedValue()) { defRefPosition->isLocalDefUse = true; defRefPosition->lastUse = true; } else { RefInfoListNode* refInfo = listNodePool.GetNode(defRefPosition, tree); defList.Append(refInfo); } #if defined(TARGET_XARCH) || defined(FEATURE_HW_INTRINSICS) setTgtPref(interval, tgtPrefUse); setTgtPref(interval, tgtPrefUse2); #endif // TARGET_XARCH #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE assert(!interval->isPartiallySpilled); #endif return defRefPosition; } //------------------------------------------------------------------------ // BuildDef: Build one or more RefTypeDef RefPositions for the given node // // Arguments: // tree - The node that defines a register // dstCount - The number of registers defined by the node // dstCandidates - the candidate registers for the definition // // Notes: // Adds the RefInfo for the definitions to the defList. // void LinearScan::BuildDefs(GenTree* tree, int dstCount, regMaskTP dstCandidates) { bool fixedReg = false; if ((dstCount > 1) && (dstCandidates != RBM_NONE) && ((int)genCountBits(dstCandidates) == dstCount)) { fixedReg = true; } const ReturnTypeDesc* retTypeDesc = nullptr; if (tree->IsMultiRegCall()) { retTypeDesc = tree->AsCall()->GetReturnTypeDesc(); } for (int i = 0; i < dstCount; i++) { regMaskTP thisDstCandidates; if (fixedReg) { // In case of multi-reg call node, we have to query the i'th position return register. // For all other cases of multi-reg definitions, the registers must be in sequential order. if (retTypeDesc != nullptr) { thisDstCandidates = genRegMask(tree->AsCall()->GetReturnTypeDesc()->GetABIReturnReg(i)); assert((dstCandidates & thisDstCandidates) != RBM_NONE); } else { thisDstCandidates = genFindLowestBit(dstCandidates); } dstCandidates &= ~thisDstCandidates; } else { thisDstCandidates = dstCandidates; } BuildDef(tree, thisDstCandidates, i); } } //------------------------------------------------------------------------ // BuildDef: Build one or more RefTypeDef RefPositions for the given node, // as well as kills as specified by the given mask. // // Arguments: // tree - The node that defines a register // dstCount - The number of registers defined by the node // dstCandidates - The candidate registers for the definition // killMask - The mask of registers killed by this node // // Notes: // Adds the RefInfo for the definitions to the defList. // The def and kill functionality is folded into a single method so that the // save and restores of upper vector registers can be bracketed around the def. // void LinearScan::BuildDefsWithKills(GenTree* tree, int dstCount, regMaskTP dstCandidates, regMaskTP killMask) { assert(killMask == getKillSetForNode(tree)); // Call this even when killMask is RBM_NONE, as we have to check for some special cases buildKillPositionsForNode(tree, currentLoc + 1, killMask); if (killMask != RBM_NONE) { #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // Build RefPositions to account for the fact that, even in a callee-save register, the upper half of any large // vector will be killed by a call. // We actually need to find any calls that kill the upper-half of the callee-save vector registers. // But we will use as a proxy any node that kills floating point registers. // (Note that some calls are masquerading as other nodes at this point so we can't just check for calls.) // We call this unconditionally for such nodes, as we will create RefPositions for any large vector tree temps // even if 'enregisterLocalVars' is false, or 'liveLargeVectors' is empty, though currently the allocation // phase will fully (rather than partially) spill those, so we don't need to build the UpperVectorRestore // RefPositions in that case. // This must be done after the kills, so that we know which large vectors are still live. // if ((killMask & RBM_FLT_CALLEE_TRASH) != RBM_NONE) { buildUpperVectorSaveRefPositions(tree, currentLoc + 1, killMask); } #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE } // Now, create the Def(s) BuildDefs(tree, dstCount, dstCandidates); } //------------------------------------------------------------------------ // BuildUse: Remove the RefInfoListNode for the given multi-reg index of the given node from // the defList, and build a use RefPosition for the associated Interval. // // Arguments: // operand - The node of interest // candidates - The register candidates for the use // multiRegIdx - The index of the multireg def/use // // Return Value: // The newly created use RefPosition // // Notes: // The node must not be contained, and must have been processed by buildRefPositionsForNode(). // RefPosition* LinearScan::BuildUse(GenTree* operand, regMaskTP candidates, int multiRegIdx) { assert(!operand->isContained()); Interval* interval; bool regOptional = operand->IsRegOptional(); if (isCandidateLocalRef(operand)) { interval = getIntervalForLocalVarNode(operand->AsLclVarCommon()); // We have only approximate last-use information at this point. This is because the // execution order doesn't actually reflect the true order in which the localVars // are referenced - but the order of the RefPositions will, so we recompute it after // RefPositions are built. // Use the old value for setting currentLiveVars - note that we do this with the // not-quite-correct setting of lastUse. However, this is OK because // 1) this is only for preferencing, which doesn't require strict correctness, and // 2) the cases where these out-of-order uses occur should not overlap a kill. // TODO-Throughput: clean this up once we have the execution order correct. At that point // we can update currentLiveVars at the same place that we create the RefPosition. if ((operand->gtFlags & GTF_VAR_DEATH) != 0) { unsigned varIndex = interval->getVarIndex(compiler); VarSetOps::RemoveElemD(compiler, currentLiveVars, varIndex); } #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE buildUpperVectorRestoreRefPosition(interval, currentLoc, operand); #endif } else if (operand->IsMultiRegLclVar()) { assert(compiler->lvaEnregMultiRegVars); LclVarDsc* varDsc = compiler->lvaGetDesc(operand->AsLclVar()); LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + multiRegIdx); interval = getIntervalForLocalVar(fieldVarDsc->lvVarIndex); if (operand->AsLclVar()->IsLastUse(multiRegIdx)) { VarSetOps::RemoveElemD(compiler, currentLiveVars, fieldVarDsc->lvVarIndex); } #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE buildUpperVectorRestoreRefPosition(interval, currentLoc, operand); #endif } else { RefInfoListNode* refInfo = defList.removeListNode(operand, multiRegIdx); RefPosition* defRefPos = refInfo->ref; assert(defRefPos->multiRegIdx == multiRegIdx); interval = defRefPos->getInterval(); listNodePool.ReturnNode(refInfo); operand = nullptr; } RefPosition* useRefPos = newRefPosition(interval, currentLoc, RefTypeUse, operand, candidates, multiRegIdx); useRefPos->setRegOptional(regOptional); return useRefPos; } //------------------------------------------------------------------------ // BuildIndirUses: Build Use RefPositions for an indirection that might be contained // // Arguments: // indirTree - The indirection node of interest // // Return Value: // The number of source registers used by the *parent* of this node. // // Notes: // This method may only be used if the candidates are the same for all sources. // int LinearScan::BuildIndirUses(GenTreeIndir* indirTree, regMaskTP candidates) { GenTree* const addr = indirTree->gtOp1; return BuildAddrUses(addr, candidates); } int LinearScan::BuildAddrUses(GenTree* addr, regMaskTP candidates) { if (!addr->isContained()) { BuildUse(addr, candidates); return 1; } if (!addr->OperIs(GT_LEA)) { return 0; } GenTreeAddrMode* const addrMode = addr->AsAddrMode(); unsigned srcCount = 0; if ((addrMode->Base() != nullptr) && !addrMode->Base()->isContained()) { BuildUse(addrMode->Base(), candidates); srcCount++; } if (addrMode->Index() != nullptr) { if (!addrMode->Index()->isContained()) { BuildUse(addrMode->Index(), candidates); srcCount++; } #ifdef TARGET_ARM64 else if (addrMode->Index()->OperIs(GT_BFIZ)) { GenTreeCast* cast = addrMode->Index()->gtGetOp1()->AsCast(); assert(cast->isContained()); BuildUse(cast->CastOp(), candidates); srcCount++; } #endif } return srcCount; } //------------------------------------------------------------------------ // BuildOperandUses: Build Use RefPositions for an operand that might be contained. // // Arguments: // node - The node of interest // // Return Value: // The number of source registers used by the *parent* of this node. // int LinearScan::BuildOperandUses(GenTree* node, regMaskTP candidates) { if (!node->isContained()) { BuildUse(node, candidates); return 1; } #ifdef TARGET_ARM64 // Must happen before OperIsHWIntrinsic case, // but this occurs when a vector zero node is marked as contained. if (node->IsVectorZero()) { return 0; } #endif #if !defined(TARGET_64BIT) if (node->OperIs(GT_LONG)) { return BuildBinaryUses(node->AsOp(), candidates); } #endif // !defined(TARGET_64BIT) if (node->OperIsIndir()) { return BuildIndirUses(node->AsIndir(), candidates); } if (node->OperIs(GT_LEA)) { return BuildAddrUses(node, candidates); } #ifdef FEATURE_HW_INTRINSICS if (node->OperIsHWIntrinsic()) { if (node->AsHWIntrinsic()->OperIsMemoryLoad()) { return BuildAddrUses(node->AsHWIntrinsic()->Op(1)); } assert(node->AsHWIntrinsic()->GetOperandCount() == 1); BuildUse(node->AsHWIntrinsic()->Op(1), candidates); return 1; } #endif // FEATURE_HW_INTRINSICS #ifdef TARGET_ARM64 if (node->OperIs(GT_MUL)) { // Can be contained for MultiplyAdd on arm64 return BuildBinaryUses(node->AsOp(), candidates); } if (node->OperIs(GT_NEG, GT_CAST, GT_LSH)) { // GT_NEG can be contained for MultiplyAdd on arm64 // GT_CAST and GT_LSH for ADD with sign/zero extension return BuildOperandUses(node->gtGetOp1(), candidates); } #endif return 0; } //------------------------------------------------------------------------ // setDelayFree: Mark a RefPosition as delayRegFree, and set pendingDelayFree // // Arguments: // use - The use RefPosition to mark // void LinearScan::setDelayFree(RefPosition* use) { use->delayRegFree = true; pendingDelayFree = true; } //------------------------------------------------------------------------ // BuildDelayFreeUses: Build Use RefPositions for an operand that might be contained, // and which may need to be marked delayRegFree // // Arguments: // node - The node of interest // rmwNode - The node that has RMW semantics (if applicable) // candidates - The set of candidates for the uses // // Return Value: // The number of source registers used by the *parent* of this node. // int LinearScan::BuildDelayFreeUses(GenTree* node, GenTree* rmwNode, regMaskTP candidates) { RefPosition* use = nullptr; Interval* rmwInterval = nullptr; bool rmwIsLastUse = false; GenTree* addr = nullptr; if ((rmwNode != nullptr) && isCandidateLocalRef(rmwNode)) { rmwInterval = getIntervalForLocalVarNode(rmwNode->AsLclVar()); // Note: we don't handle multi-reg vars here. It's not clear that there are any cases // where we'd encounter a multi-reg var in an RMW context. assert(!rmwNode->AsLclVar()->IsMultiReg()); rmwIsLastUse = rmwNode->AsLclVar()->IsLastUse(0); } if (!node->isContained()) { use = BuildUse(node, candidates); } #ifdef TARGET_ARM64 // Must happen before OperIsHWIntrinsic case, // but this occurs when a vector zero node is marked as contained. else if (node->IsVectorZero()) { return 0; } #endif #ifdef FEATURE_HW_INTRINSICS else if (node->OperIsHWIntrinsic()) { assert(node->AsHWIntrinsic()->GetOperandCount() == 1); use = BuildUse(node->AsHWIntrinsic()->Op(1), candidates); } #endif else if (!node->OperIsIndir()) { return 0; } else { GenTreeIndir* indirTree = node->AsIndir(); addr = indirTree->gtOp1; if (!addr->isContained()) { use = BuildUse(addr, candidates); } else if (!addr->OperIs(GT_LEA)) { return 0; } } if (use != nullptr) { // If node != rmwNode, then definitely node should be marked as "delayFree". // However, if node == rmwNode, then we can mark node as "delayFree" only if // none of the node/rmwNode are the last uses. If either of them are last use, // we can safely reuse the rmwNode as destination. if ((use->getInterval() != rmwInterval) || (!rmwIsLastUse && !use->lastUse)) { setDelayFree(use); } return 1; } // If we reach here we have a contained LEA in 'addr'. GenTreeAddrMode* const addrMode = addr->AsAddrMode(); unsigned srcCount = 0; if ((addrMode->Base() != nullptr) && !addrMode->Base()->isContained()) { use = BuildUse(addrMode->Base(), candidates); if ((use->getInterval() != rmwInterval) || (!rmwIsLastUse && !use->lastUse)) { setDelayFree(use); } srcCount++; } if ((addrMode->Index() != nullptr) && !addrMode->Index()->isContained()) { use = BuildUse(addrMode->Index(), candidates); if ((use->getInterval() != rmwInterval) || (!rmwIsLastUse && !use->lastUse)) { setDelayFree(use); } srcCount++; } return srcCount; } //------------------------------------------------------------------------ // BuildBinaryUses: Get the RefInfoListNodes for the operands of the // given node, and build uses for them. // // Arguments: // node - a GenTreeOp // // Return Value: // The number of actual register operands. // // Notes: // The operands must already have been processed by buildRefPositionsForNode, and their // RefInfoListNodes placed in the defList. // int LinearScan::BuildBinaryUses(GenTreeOp* node, regMaskTP candidates) { GenTree* op1 = node->gtGetOp1(); GenTree* op2 = node->gtGetOp2IfPresent(); #ifdef TARGET_XARCH if (node->OperIsBinary() && isRMWRegOper(node)) { assert(op2 != nullptr); return BuildRMWUses(node, op1, op2, candidates); } #endif // TARGET_XARCH int srcCount = 0; if (op1 != nullptr) { srcCount += BuildOperandUses(op1, candidates); } if (op2 != nullptr) { srcCount += BuildOperandUses(op2, candidates); } return srcCount; } //------------------------------------------------------------------------ // BuildStoreLocDef: Build a definition RefPosition for a local store // // Arguments: // storeLoc - the local store (GT_STORE_LCL_FLD or GT_STORE_LCL_VAR) // // Notes: // This takes an index to enable building multiple defs for a multi-reg local. // void LinearScan::BuildStoreLocDef(GenTreeLclVarCommon* storeLoc, LclVarDsc* varDsc, RefPosition* singleUseRef, int index) { assert(varDsc->lvTracked); unsigned varIndex = varDsc->lvVarIndex; Interval* varDefInterval = getIntervalForLocalVar(varIndex); if (!storeLoc->IsLastUse(index)) { VarSetOps::AddElemD(compiler, currentLiveVars, varIndex); } if (singleUseRef != nullptr) { Interval* srcInterval = singleUseRef->getInterval(); if (srcInterval->relatedInterval == nullptr) { // Preference the source to the dest, unless this is a non-last-use localVar. // Note that the last-use info is not correct, but it is a better approximation than preferencing // the source to the dest, if the source's lifetime extends beyond the dest. if (!srcInterval->isLocalVar || (singleUseRef->treeNode->gtFlags & GTF_VAR_DEATH) != 0) { srcInterval->assignRelatedInterval(varDefInterval); } } else if (!srcInterval->isLocalVar) { // Preference the source to dest, if src is not a local var. srcInterval->assignRelatedInterval(varDefInterval); } } regMaskTP defCandidates = RBM_NONE; var_types type = varDsc->GetRegisterType(); #ifdef TARGET_X86 if (varTypeIsByte(type)) { defCandidates = allByteRegs(); } else { defCandidates = allRegs(type); } #else defCandidates = allRegs(type); #endif // TARGET_X86 RefPosition* def = newRefPosition(varDefInterval, currentLoc + 1, RefTypeDef, storeLoc, defCandidates, index); if (varDefInterval->isWriteThru) { // We always make write-thru defs reg-optional, as we can store them if they don't // get a register. def->regOptional = true; } #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE if (Compiler::varTypeNeedsPartialCalleeSave(varDefInterval->registerType)) { varDefInterval->isPartiallySpilled = false; } #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE } //------------------------------------------------------------------------ // BuildMultiRegStoreLoc: Set register requirements for a store of a lclVar // // Arguments: // storeLoc - the multireg local store (GT_STORE_LCL_VAR) // // Returns: // The number of source registers read. // int LinearScan::BuildMultiRegStoreLoc(GenTreeLclVar* storeLoc) { GenTree* op1 = storeLoc->gtGetOp1(); unsigned int dstCount = storeLoc->GetFieldCount(compiler); unsigned int srcCount = dstCount; LclVarDsc* varDsc = compiler->lvaGetDesc(storeLoc); assert(compiler->lvaEnregMultiRegVars); assert(storeLoc->OperGet() == GT_STORE_LCL_VAR); bool isMultiRegSrc = op1->IsMultiRegNode(); // The source must be: // - a multi-reg source // - an enregisterable SIMD type, or // - in-memory local // if (isMultiRegSrc) { assert(op1->GetMultiRegCount(compiler) == srcCount); } else if (varTypeIsEnregisterable(op1)) { // Create a delay free use, as we'll have to use it to create each field RefPosition* use = BuildUse(op1, RBM_NONE); setDelayFree(use); srcCount = 1; } else { // Otherwise we must have an in-memory struct lclVar. // We will just load directly into the register allocated for this lclVar, // so we don't need to build any uses. assert(op1->OperIs(GT_LCL_VAR) && op1->isContained() && op1->TypeIs(TYP_STRUCT)); srcCount = 0; } // For multi-reg local stores of multi-reg sources, the code generator will read each source // register, and then move it, if needed, to the destination register. These nodes have // 2*N locations where N is the number of registers, so that the liveness can // be reflected accordingly. // for (unsigned int i = 0; i < dstCount; ++i) { LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + i); RefPosition* singleUseRef = nullptr; if (isMultiRegSrc) { regMaskTP srcCandidates = RBM_NONE; #ifdef TARGET_X86 var_types type = fieldVarDsc->TypeGet(); if (varTypeIsByte(type)) { srcCandidates = allByteRegs(); } #endif // TARGET_X86 singleUseRef = BuildUse(op1, srcCandidates, i); } assert(isCandidateVar(fieldVarDsc)); BuildStoreLocDef(storeLoc, fieldVarDsc, singleUseRef, i); if (isMultiRegSrc && (i < (dstCount - 1))) { currentLoc += 2; } } return srcCount; } //------------------------------------------------------------------------ // BuildStoreLoc: Set register requirements for a store of a lclVar // // Arguments: // storeLoc - the local store (GT_STORE_LCL_FLD or GT_STORE_LCL_VAR) // // Notes: // This involves: // - Setting the appropriate candidates. // - Handling of contained immediates. // - Requesting an internal register for SIMD12 stores. // int LinearScan::BuildStoreLoc(GenTreeLclVarCommon* storeLoc) { GenTree* op1 = storeLoc->gtGetOp1(); int srcCount; RefPosition* singleUseRef = nullptr; LclVarDsc* varDsc = compiler->lvaGetDesc(storeLoc); if (storeLoc->IsMultiRegLclVar()) { return BuildMultiRegStoreLoc(storeLoc->AsLclVar()); } // First, define internal registers. #ifdef FEATURE_SIMD if (varTypeIsSIMD(storeLoc) && !op1->IsCnsIntOrI() && (storeLoc->TypeGet() == TYP_SIMD12)) { // Need an additional register to extract upper 4 bytes of Vector3, // it has to be float for x86. buildInternalFloatRegisterDefForNode(storeLoc, allSIMDRegs()); } #endif // FEATURE_SIMD // Second, use source registers. if (op1->IsMultiRegNode() && (op1->GetMultiRegCount(compiler) > 1)) { // This is the case where the source produces multiple registers. // This must be a store lclvar. assert(storeLoc->OperGet() == GT_STORE_LCL_VAR); srcCount = op1->GetMultiRegCount(compiler); for (int i = 0; i < srcCount; ++i) { BuildUse(op1, RBM_NONE, i); } #if defined(FEATURE_SIMD) && defined(TARGET_X86) if (TargetOS::IsWindows && !compiler->compOpportunisticallyDependsOn(InstructionSet_SSE41)) { if (varTypeIsSIMD(storeLoc) && op1->IsCall()) { // Need an additional register to create a SIMD8 from EAX/EDX without SSE4.1. buildInternalFloatRegisterDefForNode(storeLoc, allSIMDRegs()); if (isCandidateVar(varDsc)) { // This internal register must be different from the target register. setInternalRegsDelayFree = true; } } } #endif // FEATURE_SIMD && TARGET_X86 } else if (op1->isContained() && op1->OperIs(GT_BITCAST)) { GenTree* bitCastSrc = op1->gtGetOp1(); RegisterType registerType = bitCastSrc->TypeGet(); singleUseRef = BuildUse(bitCastSrc, allRegs(registerType)); Interval* srcInterval = singleUseRef->getInterval(); assert(srcInterval->registerType == registerType); srcCount = 1; } #ifndef TARGET_64BIT else if (varTypeIsLong(op1)) { // GT_MUL_LONG is handled by the IsMultiRegNode case above. assert(op1->OperIs(GT_LONG)); assert(op1->isContained() && !op1->gtGetOp1()->isContained() && !op1->gtGetOp2()->isContained()); srcCount = BuildBinaryUses(op1->AsOp()); assert(srcCount == 2); } #endif // !TARGET_64BIT else if (op1->isContained()) { #ifdef TARGET_XARCH if (varTypeIsSIMD(storeLoc)) { // This is the zero-init case, and we need a register to hold the zero. // (On Arm64 we can just store REG_ZR.) assert(op1->IsSIMDZero()); singleUseRef = BuildUse(op1->gtGetOp1()); srcCount = 1; } else #endif { srcCount = 0; } } else { srcCount = 1; regMaskTP srcCandidates = RBM_NONE; #ifdef TARGET_X86 var_types type = varDsc->GetRegisterType(storeLoc); if (varTypeIsByte(type)) { srcCandidates = allByteRegs(); } #endif // TARGET_X86 singleUseRef = BuildUse(op1, srcCandidates); } // Third, use internal registers. #ifdef TARGET_ARM if (storeLoc->OperIs(GT_STORE_LCL_FLD) && storeLoc->AsLclFld()->IsOffsetMisaligned()) { buildInternalIntRegisterDefForNode(storeLoc); // to generate address. buildInternalIntRegisterDefForNode(storeLoc); // to move float into an int reg. if (storeLoc->TypeIs(TYP_DOUBLE)) { buildInternalIntRegisterDefForNode(storeLoc); // to move the second half into an int reg. } } #endif // TARGET_ARM #if defined(FEATURE_SIMD) || defined(TARGET_ARM) buildInternalRegisterUses(); #endif // FEATURE_SIMD || TARGET_ARM // Fourth, define destination registers. // Add the lclVar to currentLiveVars (if it will remain live) if (isCandidateVar(varDsc)) { BuildStoreLocDef(storeLoc, varDsc, singleUseRef, 0); } return srcCount; } //------------------------------------------------------------------------ // BuildSimple: Builds use RefPositions for trees requiring no special handling // // Arguments: // tree - The node of interest // // Return Value: // The number of use RefPositions created // int LinearScan::BuildSimple(GenTree* tree) { unsigned kind = tree->OperKind(); int srcCount = 0; if ((kind & GTK_LEAF) == 0) { assert((kind & GTK_SMPOP) != 0); srcCount = BuildBinaryUses(tree->AsOp()); } if (tree->IsValue()) { BuildDef(tree); } return srcCount; } //------------------------------------------------------------------------ // BuildReturn: Set the NodeInfo for a GT_RETURN. // // Arguments: // tree - The node of interest // // Return Value: // The number of sources consumed by this node. // int LinearScan::BuildReturn(GenTree* tree) { GenTree* op1 = tree->gtGetOp1(); #if !defined(TARGET_64BIT) if (tree->TypeGet() == TYP_LONG) { assert((op1->OperGet() == GT_LONG) && op1->isContained()); GenTree* loVal = op1->gtGetOp1(); GenTree* hiVal = op1->gtGetOp2(); BuildUse(loVal, RBM_LNGRET_LO); BuildUse(hiVal, RBM_LNGRET_HI); return 2; } else #endif // !defined(TARGET_64BIT) if ((tree->TypeGet() != TYP_VOID) && !op1->isContained()) { regMaskTP useCandidates = RBM_NONE; #if FEATURE_MULTIREG_RET #ifdef TARGET_ARM64 if (varTypeIsSIMD(tree) && !op1->IsMultiRegLclVar()) { useCandidates = allSIMDRegs(); if (op1->OperGet() == GT_LCL_VAR) { assert(op1->TypeGet() != TYP_SIMD32); useCandidates = RBM_DOUBLERET; } BuildUse(op1, useCandidates); return 1; } #endif // TARGET_ARM64 if (varTypeIsStruct(tree)) { // op1 has to be either a lclvar or a multi-reg returning call if ((op1->OperGet() == GT_LCL_VAR) && !op1->IsMultiRegLclVar()) { BuildUse(op1, useCandidates); } else { noway_assert(op1->IsMultiRegCall() || op1->IsMultiRegLclVar()); int srcCount; ReturnTypeDesc nonCallRetTypeDesc; const ReturnTypeDesc* pRetTypeDesc; if (op1->OperIs(GT_CALL)) { pRetTypeDesc = op1->AsCall()->GetReturnTypeDesc(); } else { assert(compiler->lvaEnregMultiRegVars); LclVarDsc* varDsc = compiler->lvaGetDesc(op1->AsLclVar()); nonCallRetTypeDesc.InitializeStructReturnType(compiler, varDsc->GetStructHnd(), compiler->info.compCallConv); pRetTypeDesc = &nonCallRetTypeDesc; assert(compiler->lvaGetDesc(op1->AsLclVar())->lvFieldCnt == nonCallRetTypeDesc.GetReturnRegCount()); } srcCount = pRetTypeDesc->GetReturnRegCount(); // For any source that's coming from a different register file, we need to ensure that // we reserve the specific ABI register we need. bool hasMismatchedRegTypes = false; if (op1->IsMultiRegLclVar()) { for (int i = 0; i < srcCount; i++) { RegisterType srcType = regType(op1->AsLclVar()->GetFieldTypeByIndex(compiler, i)); RegisterType dstType = regType(pRetTypeDesc->GetReturnRegType(i)); if (srcType != dstType) { hasMismatchedRegTypes = true; regMaskTP dstRegMask = genRegMask(pRetTypeDesc->GetABIReturnReg(i)); if (varTypeUsesFloatReg(dstType)) { buildInternalFloatRegisterDefForNode(tree, dstRegMask); } else { buildInternalIntRegisterDefForNode(tree, dstRegMask); } } } } for (int i = 0; i < srcCount; i++) { // We will build uses of the type of the operand registers/fields, and the codegen // for return will move as needed. if (!hasMismatchedRegTypes || (regType(op1->AsLclVar()->GetFieldTypeByIndex(compiler, i)) == regType(pRetTypeDesc->GetReturnRegType(i)))) { BuildUse(op1, genRegMask(pRetTypeDesc->GetABIReturnReg(i)), i); } else { BuildUse(op1, RBM_NONE, i); } } if (hasMismatchedRegTypes) { buildInternalRegisterUses(); } return srcCount; } } else #endif // FEATURE_MULTIREG_RET { // Non-struct type return - determine useCandidates switch (tree->TypeGet()) { case TYP_VOID: useCandidates = RBM_NONE; break; case TYP_FLOAT: useCandidates = RBM_FLOATRET; break; case TYP_DOUBLE: // We ONLY want the valid double register in the RBM_DOUBLERET mask. useCandidates = (RBM_DOUBLERET & RBM_ALLDOUBLE); break; case TYP_LONG: useCandidates = RBM_LNGRET; break; default: useCandidates = RBM_INTRET; break; } BuildUse(op1, useCandidates); return 1; } } // No kills or defs. return 0; } //------------------------------------------------------------------------ // supportsSpecialPutArg: Determine if we can support specialPutArgs // // Return Value: // True iff specialPutArg intervals can be supported. // // Notes: // See below. // bool LinearScan::supportsSpecialPutArg() { #if defined(DEBUG) && defined(TARGET_X86) // On x86, `LSRA_LIMIT_CALLER` is too restrictive to allow the use of special put args: this stress mode // leaves only three registers allocatable--eax, ecx, and edx--of which the latter two are also used for the // first two integral arguments to a call. This can leave us with too few registers to succesfully allocate in // situations like the following: // // t1026 = lclVar ref V52 tmp35 u:3 REG NA <l:$3a1, c:$98d> // // /--* t1026 ref // t1352 = * putarg_reg ref REG NA // // t342 = lclVar int V14 loc6 u:4 REG NA $50c // // t343 = const int 1 REG NA $41 // // /--* t342 int // +--* t343 int // t344 = * + int REG NA $495 // // t345 = lclVar int V04 arg4 u:2 REG NA $100 // // /--* t344 int // +--* t345 int // t346 = * % int REG NA $496 // // /--* t346 int // t1353 = * putarg_reg int REG NA // // t1354 = lclVar ref V52 tmp35 (last use) REG NA // // /--* t1354 ref // t1355 = * lea(b+0) byref REG NA // // Here, the first `putarg_reg` would normally be considered a special put arg, which would remove `ecx` from the // set of allocatable registers, leaving only `eax` and `edx`. The allocator will then fail to allocate a register // for the def of `t345` if arg4 is not a register candidate: the corresponding ref position will be constrained to // { `ecx`, `ebx`, `esi`, `edi` }, which `LSRA_LIMIT_CALLER` will further constrain to `ecx`, which will not be // available due to the special put arg. return getStressLimitRegs() != LSRA_LIMIT_CALLER; #else return true; #endif } //------------------------------------------------------------------------ // BuildPutArgReg: Set the NodeInfo for a PUTARG_REG. // // Arguments: // node - The PUTARG_REG node. // argReg - The register in which to pass the argument. // info - The info for the node's using call. // isVarArgs - True if the call uses a varargs calling convention. // callHasFloatRegArgs - Set to true if this PUTARG_REG uses an FP register. // // Return Value: // None. // int LinearScan::BuildPutArgReg(GenTreeUnOp* node) { assert(node != nullptr); assert(node->OperIsPutArgReg()); regNumber argReg = node->GetRegNum(); assert(argReg != REG_NA); bool isSpecialPutArg = false; int srcCount = 1; GenTree* op1 = node->gtGetOp1(); // First, handle the GT_OBJ case, which loads into the arg register // (so we don't set the use to prefer that register for the source address). if (op1->OperIs(GT_OBJ)) { GenTreeObj* obj = op1->AsObj(); GenTree* addr = obj->Addr(); unsigned size = obj->GetLayout()->GetSize(); assert(size <= MAX_PASS_SINGLEREG_BYTES); if (addr->OperIsLocalAddr()) { // We don't need a source register. assert(addr->isContained()); srcCount = 0; } else if (!isPow2(size)) { // We'll need an internal register to do the odd-size load. // This can only happen with integer registers. assert(genIsValidIntReg(argReg)); buildInternalIntRegisterDefForNode(node); BuildUse(addr); buildInternalRegisterUses(); } return srcCount; } // To avoid redundant moves, have the argument operand computed in the // register in which the argument is passed to the call. regMaskTP argMask = genRegMask(argReg); RefPosition* use = BuildUse(op1, argMask); if (supportsSpecialPutArg() && isCandidateLocalRef(op1) && ((op1->gtFlags & GTF_VAR_DEATH) == 0)) { // This is the case for a "pass-through" copy of a lclVar. In the case where it is a non-last-use, // we don't want the def of the copy to kill the lclVar register, if it is assigned the same register // (which is actually what we hope will happen). JITDUMP("Setting putarg_reg as a pass-through of a non-last use lclVar\n"); // Preference the destination to the interval of the first register defined by the first operand. assert(use->getInterval()->isLocalVar); isSpecialPutArg = true; } #ifdef TARGET_ARM // If type of node is `long` then it is actually `double`. // The actual `long` types must have been transformed as a field list with two fields. if (node->TypeGet() == TYP_LONG) { srcCount++; regMaskTP argMaskHi = genRegMask(REG_NEXT(argReg)); assert(genRegArgNext(argReg) == REG_NEXT(argReg)); use = BuildUse(op1, argMaskHi, 1); BuildDef(node, argMask, 0); BuildDef(node, argMaskHi, 1); } else #endif // TARGET_ARM { RefPosition* def = BuildDef(node, argMask); if (isSpecialPutArg) { def->getInterval()->isSpecialPutArg = true; def->getInterval()->assignRelatedInterval(use->getInterval()); } } return srcCount; } //------------------------------------------------------------------------ // HandleFloatVarArgs: Handle additional register requirements for a varargs call // // Arguments: // call - The call node of interest // argNode - The current argument // // Return Value: // None. // // Notes: // In the case of a varargs call, the ABI dictates that if we have floating point args, // we must pass the enregistered arguments in both the integer and floating point registers. // Since the integer register is not associated with the arg node, we will reserve it as // an internal register on the call so that it is not used during the evaluation of the call node // (e.g. for the target). void LinearScan::HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* callHasFloatRegArgs) { if (compFeatureVarArg() && call->IsVarargs() && varTypeIsFloating(argNode)) { *callHasFloatRegArgs = true; // We'll have to return the internal def and then later create a use for it. regNumber argReg = argNode->GetRegNum(); regNumber targetReg = compiler->getCallArgIntRegister(argReg); buildInternalIntRegisterDefForNode(call, genRegMask(targetReg)); } } //------------------------------------------------------------------------ // BuildGCWriteBarrier: Handle additional register requirements for a GC write barrier // // Arguments: // tree - The STORE_IND for which a write barrier is required // int LinearScan::BuildGCWriteBarrier(GenTree* tree) { GenTree* addr = tree->gtGetOp1(); GenTree* src = tree->gtGetOp2(); // In the case where we are doing a helper assignment, even if the dst // is an indir through an lea, we need to actually instantiate the // lea in a register assert(!addr->isContained() && !src->isContained()); regMaskTP addrCandidates = RBM_ARG_0; regMaskTP srcCandidates = RBM_ARG_1; #if defined(TARGET_ARM64) // the 'addr' goes into x14 (REG_WRITE_BARRIER_DST) // the 'src' goes into x15 (REG_WRITE_BARRIER_SRC) // addrCandidates = RBM_WRITE_BARRIER_DST; srcCandidates = RBM_WRITE_BARRIER_SRC; #elif defined(TARGET_X86) && NOGC_WRITE_BARRIERS bool useOptimizedWriteBarrierHelper = compiler->codeGen->genUseOptimizedWriteBarriers(tree, src); if (useOptimizedWriteBarrierHelper) { // Special write barrier: // op1 (addr) goes into REG_WRITE_BARRIER (rdx) and // op2 (src) goes into any int register. addrCandidates = RBM_WRITE_BARRIER; srcCandidates = RBM_WRITE_BARRIER_SRC; } #endif // defined(TARGET_X86) && NOGC_WRITE_BARRIERS BuildUse(addr, addrCandidates); BuildUse(src, srcCandidates); regMaskTP killMask = getKillSetForStoreInd(tree->AsStoreInd()); buildKillPositionsForNode(tree, currentLoc + 1, killMask); return 2; } //------------------------------------------------------------------------ // BuildCmp: Set the register requirements for a compare. // // Arguments: // tree - The node of interest // // Return Value: // None. // int LinearScan::BuildCmp(GenTree* tree) { assert(tree->OperIsCompare() || tree->OperIs(GT_CMP) || tree->OperIs(GT_JCMP)); regMaskTP dstCandidates = RBM_NONE; regMaskTP op1Candidates = RBM_NONE; regMaskTP op2Candidates = RBM_NONE; GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); #ifdef TARGET_X86 // If the compare is used by a jump, we just need to set the condition codes. If not, then we need // to store the result into the low byte of a register, which requires the dst be a byteable register. if (tree->TypeGet() != TYP_VOID) { dstCandidates = allByteRegs(); } bool needByteRegs = false; if (varTypeIsByte(tree)) { if (!varTypeIsFloating(op1)) { needByteRegs = true; } } // Example1: GT_EQ(int, op1 of type ubyte, op2 of type ubyte) - in this case codegen uses // ubyte as the result of comparison and if the result needs to be materialized into a reg // simply zero extend it to TYP_INT size. Here is an example of generated code: // cmp dl, byte ptr[addr mode] // movzx edx, dl else if (varTypeIsByte(op1) && varTypeIsByte(op2)) { needByteRegs = true; } // Example2: GT_EQ(int, op1 of type ubyte, op2 is GT_CNS_INT) - in this case codegen uses // ubyte as the result of the comparison and if the result needs to be materialized into a reg // simply zero extend it to TYP_INT size. else if (varTypeIsByte(op1) && op2->IsCnsIntOrI()) { needByteRegs = true; } // Example3: GT_EQ(int, op1 is GT_CNS_INT, op2 of type ubyte) - in this case codegen uses // ubyte as the result of the comparison and if the result needs to be materialized into a reg // simply zero extend it to TYP_INT size. else if (op1->IsCnsIntOrI() && varTypeIsByte(op2)) { needByteRegs = true; } if (needByteRegs) { if (!op1->isContained()) { op1Candidates = allByteRegs(); } if (!op2->isContained()) { op2Candidates = allByteRegs(); } } #endif // TARGET_X86 int srcCount = BuildOperandUses(op1, op1Candidates); srcCount += BuildOperandUses(op2, op2Candidates); if (tree->TypeGet() != TYP_VOID) { BuildDef(tree, dstCandidates); } return srcCount; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Interval and RefPosition Building XX XX XX XX This contains the logic for constructing Intervals and RefPositions that XX XX is common across architectures. See lsra{arch}.cpp for the architecture- XX XX specific methods for building. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "lsra.h" //------------------------------------------------------------------------ // RefInfoList //------------------------------------------------------------------------ // removeListNode - retrieve the RefInfoListNode for the given GenTree node // // Notes: // The BuildNode methods use this helper to retrieve the RefPositions for child nodes // from the useList being constructed. Note that, if the user knows the order of the operands, // it is expected that they should just retrieve them directly. RefInfoListNode* RefInfoList::removeListNode(GenTree* node) { RefInfoListNode* prevListNode = nullptr; for (RefInfoListNode *listNode = Begin(), *end = End(); listNode != end; listNode = listNode->Next()) { if (listNode->treeNode == node) { assert(listNode->ref->getMultiRegIdx() == 0); return removeListNode(listNode, prevListNode); } prevListNode = listNode; } assert(!"removeListNode didn't find the node"); unreached(); } //------------------------------------------------------------------------ // removeListNode - retrieve the RefInfoListNode for one reg of the given multireg GenTree node // // Notes: // The BuildNode methods use this helper to retrieve the RefPositions for child nodes // from the useList being constructed. Note that, if the user knows the order of the operands, // it is expected that they should just retrieve them directly. RefInfoListNode* RefInfoList::removeListNode(GenTree* node, unsigned multiRegIdx) { RefInfoListNode* prevListNode = nullptr; for (RefInfoListNode *listNode = Begin(), *end = End(); listNode != end; listNode = listNode->Next()) { if ((listNode->treeNode == node) && (listNode->ref->getMultiRegIdx() == multiRegIdx)) { return removeListNode(listNode, prevListNode); } prevListNode = listNode; } assert(!"removeListNode didn't find the node"); unreached(); } //------------------------------------------------------------------------ // RefInfoListNodePool::RefInfoListNodePool: // Creates a pool of `RefInfoListNode` values. // // Arguments: // compiler - The compiler context. // preallocate - The number of nodes to preallocate. // RefInfoListNodePool::RefInfoListNodePool(Compiler* compiler, unsigned preallocate) : m_compiler(compiler) { if (preallocate > 0) { RefInfoListNode* preallocatedNodes = compiler->getAllocator(CMK_LSRA).allocate<RefInfoListNode>(preallocate); RefInfoListNode* head = preallocatedNodes; head->m_next = nullptr; for (unsigned i = 1; i < preallocate; i++) { RefInfoListNode* node = &preallocatedNodes[i]; node->m_next = head; head = node; } m_freeList = head; } } //------------------------------------------------------------------------ // RefInfoListNodePool::GetNode: Fetches an unused node from the // pool. // // Arguments: // r - The `RefPosition` for the `RefInfo` value. // t - The IR node for the `RefInfo` value // // Returns: // A pooled or newly-allocated `RefInfoListNode`, depending on the // contents of the pool. RefInfoListNode* RefInfoListNodePool::GetNode(RefPosition* r, GenTree* t) { RefInfoListNode* head = m_freeList; if (head == nullptr) { head = m_compiler->getAllocator(CMK_LSRA).allocate<RefInfoListNode>(1); } else { m_freeList = head->m_next; } head->ref = r; head->treeNode = t; head->m_next = nullptr; return head; } //------------------------------------------------------------------------ // RefInfoListNodePool::ReturnNode: Returns a list of nodes to the node // pool and clears the given list. // // Arguments: // list - The list to return. // void RefInfoListNodePool::ReturnNode(RefInfoListNode* listNode) { listNode->m_next = m_freeList; m_freeList = listNode; } //------------------------------------------------------------------------ // newInterval: Create a new Interval of the given RegisterType. // // Arguments: // theRegisterType - The type of Interval to create. // // TODO-Cleanup: Consider adding an overload that takes a varDsc, and can appropriately // set such fields as isStructField // Interval* LinearScan::newInterval(RegisterType theRegisterType) { intervals.emplace_back(theRegisterType, allRegs(theRegisterType)); Interval* newInt = &intervals.back(); #ifdef DEBUG newInt->intervalIndex = static_cast<unsigned>(intervals.size() - 1); #endif // DEBUG DBEXEC(VERBOSE, newInt->dump()); return newInt; } //------------------------------------------------------------------------ // newRefPositionRaw: Create a new RefPosition // // Arguments: // nodeLocation - The location of the reference. // treeNode - The GenTree of the reference. // refType - The type of reference // // Notes: // This is used to create RefPositions for both RegRecords and Intervals, // so it does only the common initialization. // RefPosition* LinearScan::newRefPositionRaw(LsraLocation nodeLocation, GenTree* treeNode, RefType refType) { refPositions.emplace_back(curBBNum, nodeLocation, treeNode, refType); RefPosition* newRP = &refPositions.back(); #ifdef DEBUG newRP->rpNum = static_cast<unsigned>(refPositions.size() - 1); #endif // DEBUG return newRP; } //------------------------------------------------------------------------ // resolveConflictingDefAndUse: Resolve the situation where we have conflicting def and use // register requirements on a single-def, single-use interval. // // Arguments: // defRefPosition - The interval definition // useRefPosition - The (sole) interval use // // Return Value: // None. // // Assumptions: // The two RefPositions are for the same interval, which is a tree-temp. // // Notes: // We require some special handling for the case where the use is a "delayRegFree" case of a fixedReg. // In that case, if we change the registerAssignment on the useRefPosition, we will lose the fact that, // even if we assign a different register (and rely on codegen to do the copy), that fixedReg also needs // to remain busy until the Def register has been allocated. In that case, we don't allow Case 1 or Case 4 // below. // Here are the cases we consider (in this order): // 1. If The defRefPosition specifies a single register, and there are no conflicting // FixedReg uses of it between the def and use, we use that register, and the code generator // will insert the copy. Note that it cannot be in use because there is a FixedRegRef for the def. // 2. If the useRefPosition specifies a single register, and it is not in use, and there are no // conflicting FixedReg uses of it between the def and use, we use that register, and the code generator // will insert the copy. // 3. If the defRefPosition specifies a single register (but there are conflicts, as determined // in 1.), and there are no conflicts with the useRefPosition register (if it's a single register), /// we set the register requirements on the defRefPosition to the use registers, and the // code generator will insert a copy on the def. We can't rely on the code generator to put a copy // on the use if it has multiple possible candidates, as it won't know which one has been allocated. // 4. If the useRefPosition specifies a single register, and there are no conflicts with the register // on the defRefPosition, we leave the register requirements on the defRefPosition as-is, and set // the useRefPosition to the def registers, for similar reasons to case #3. // 5. If both the defRefPosition and the useRefPosition specify single registers, but both have conflicts, // We set the candiates on defRefPosition to be all regs of the appropriate type, and since they are // single registers, codegen can insert the copy. // 6. Finally, if the RefPositions specify disjoint subsets of the registers (or the use is fixed but // has a conflict), we must insert a copy. The copy will be inserted before the use if the // use is not fixed (in the fixed case, the code generator will insert the use). // // TODO-CQ: We get bad register allocation in case #3 in the situation where no register is // available for the lifetime. We end up allocating a register that must be spilled, and it probably // won't be the register that is actually defined by the target instruction. So, we have to copy it // and THEN spill it. In this case, we should be using the def requirement. But we need to change // the interface to this method a bit to make that work (e.g. returning a candidate set to use, but // leaving the registerAssignment as-is on the def, so that if we find that we need to spill anyway // we can use the fixed-reg on the def. // void LinearScan::resolveConflictingDefAndUse(Interval* interval, RefPosition* defRefPosition) { assert(!interval->isLocalVar); RefPosition* useRefPosition = defRefPosition->nextRefPosition; regMaskTP defRegAssignment = defRefPosition->registerAssignment; regMaskTP useRegAssignment = useRefPosition->registerAssignment; RegRecord* defRegRecord = nullptr; RegRecord* useRegRecord = nullptr; regNumber defReg = REG_NA; regNumber useReg = REG_NA; bool defRegConflict = ((defRegAssignment & useRegAssignment) == RBM_NONE); bool useRegConflict = defRegConflict; // If the useRefPosition is a "delayRegFree", we can't change the registerAssignment // on it, or we will fail to ensure that the fixedReg is busy at the time the target // (of the node that uses this interval) is allocated. bool canChangeUseAssignment = !useRefPosition->isFixedRegRef || !useRefPosition->delayRegFree; INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CONFLICT)); if (!canChangeUseAssignment) { INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_FIXED_DELAY_USE)); } if (defRefPosition->isFixedRegRef && !defRegConflict) { defReg = defRefPosition->assignedReg(); defRegRecord = getRegisterRecord(defReg); if (canChangeUseAssignment) { RefPosition* currFixedRegRefPosition = defRegRecord->recentRefPosition; assert(currFixedRegRefPosition != nullptr && currFixedRegRefPosition->nodeLocation == defRefPosition->nodeLocation); if (currFixedRegRefPosition->nextRefPosition == nullptr || currFixedRegRefPosition->nextRefPosition->nodeLocation > useRefPosition->getRefEndLocation()) { // This is case #1. Use the defRegAssignment INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE1)); useRefPosition->registerAssignment = defRegAssignment; return; } else { defRegConflict = true; } } } if (useRefPosition->isFixedRegRef && !useRegConflict) { useReg = useRefPosition->assignedReg(); useRegRecord = getRegisterRecord(useReg); // We know that useRefPosition is a fixed use, so the nextRefPosition must not be null. RefPosition* nextFixedRegRefPosition = useRegRecord->getNextRefPosition(); assert(nextFixedRegRefPosition != nullptr && nextFixedRegRefPosition->nodeLocation <= useRefPosition->nodeLocation); // First, check to see if there are any conflicting FixedReg references between the def and use. if (nextFixedRegRefPosition->nodeLocation == useRefPosition->nodeLocation) { // OK, no conflicting FixedReg references. // Now, check to see whether it is currently in use. if (useRegRecord->assignedInterval != nullptr) { RefPosition* possiblyConflictingRef = useRegRecord->assignedInterval->recentRefPosition; LsraLocation possiblyConflictingRefLocation = possiblyConflictingRef->getRefEndLocation(); if (possiblyConflictingRefLocation >= defRefPosition->nodeLocation) { useRegConflict = true; } } if (!useRegConflict) { // This is case #2. Use the useRegAssignment INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE2, interval)); defRefPosition->registerAssignment = useRegAssignment; return; } } else { useRegConflict = true; } } if (defRegRecord != nullptr && !useRegConflict) { // This is case #3. INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE3, interval)); defRefPosition->registerAssignment = useRegAssignment; return; } if (useRegRecord != nullptr && !defRegConflict && canChangeUseAssignment) { // This is case #4. INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE4, interval)); useRefPosition->registerAssignment = defRegAssignment; return; } if (defRegRecord != nullptr && useRegRecord != nullptr) { // This is case #5. INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE5, interval)); RegisterType regType = interval->registerType; assert((getRegisterType(interval, defRefPosition) == regType) && (getRegisterType(interval, useRefPosition) == regType)); regMaskTP candidates = allRegs(regType); defRefPosition->registerAssignment = candidates; defRefPosition->isFixedRegRef = false; return; } INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE6, interval)); return; } //------------------------------------------------------------------------ // applyCalleeSaveHeuristics: Set register preferences for an interval based on the given RefPosition // // Arguments: // rp - The RefPosition of interest // // Notes: // This is slightly more general than its name applies, and updates preferences not just // for callee-save registers. // void LinearScan::applyCalleeSaveHeuristics(RefPosition* rp) { #ifdef TARGET_AMD64 if (compiler->opts.compDbgEnC) { // We only use RSI and RDI for EnC code, so we don't want to favor callee-save regs. return; } #endif // TARGET_AMD64 Interval* theInterval = rp->getInterval(); #ifdef DEBUG if (!doReverseCallerCallee()) #endif // DEBUG { // Set preferences so that this register set will be preferred for earlier refs theInterval->mergeRegisterPreferences(rp->registerAssignment); } } //------------------------------------------------------------------------ // checkConflictingDefUse: Ensure that we have consistent def/use on SDSU temps. // // Arguments: // useRP - The use RefPosition of a tree temp (SDSU Interval) // // Notes: // There are a couple of cases where this may over-constrain allocation: // 1. In the case of a non-commutative rmw def (in which the rmw source must be delay-free), or // 2. In the case where the defining node requires a temp distinct from the target (also a // delay-free case). // In those cases, if we propagate a single-register restriction from the consumer to the producer // the delayed uses will not see a fixed reference in the PhysReg at that position, and may // incorrectly allocate that register. // TODO-CQ: This means that we may often require a copy at the use of this node's result. // This case could be moved to BuildRefPositionsForNode, at the point where the def RefPosition is // created, causing a RefTypeFixedReg to be added at that location. This, however, results in // more PhysReg RefPositions (a throughput impact), and a large number of diffs that require // further analysis to determine benefit. // See Issue #11274. // void LinearScan::checkConflictingDefUse(RefPosition* useRP) { assert(useRP->refType == RefTypeUse); Interval* theInterval = useRP->getInterval(); assert(!theInterval->isLocalVar); RefPosition* defRP = theInterval->firstRefPosition; // All defs must have a valid treeNode, but we check it below to be conservative. assert(defRP->treeNode != nullptr); regMaskTP prevAssignment = defRP->registerAssignment; regMaskTP newAssignment = (prevAssignment & useRP->registerAssignment); if (newAssignment != RBM_NONE) { if (!isSingleRegister(newAssignment) || !theInterval->hasInterferingUses) { defRP->registerAssignment = newAssignment; } } else { theInterval->hasConflictingDefUse = true; } } //------------------------------------------------------------------------ // associateRefPosWithInterval: Update the Interval based on the given RefPosition. // // Arguments: // rp - The RefPosition of interest // // Notes: // This is called at the time when 'rp' has just been created, so it becomes // the nextRefPosition of the recentRefPosition, and both the recentRefPosition // and lastRefPosition of its referent. // void LinearScan::associateRefPosWithInterval(RefPosition* rp) { Referenceable* theReferent = rp->referent; if (theReferent != nullptr) { // All RefPositions except the dummy ones at the beginning of blocks if (rp->isIntervalRef()) { Interval* theInterval = rp->getInterval(); applyCalleeSaveHeuristics(rp); if (theInterval->isLocalVar) { if (RefTypeIsUse(rp->refType)) { RefPosition* const prevRP = theInterval->recentRefPosition; if ((prevRP != nullptr) && (prevRP->bbNum == rp->bbNum)) { prevRP->lastUse = false; } } rp->lastUse = (rp->refType != RefTypeExpUse) && (rp->refType != RefTypeParamDef) && (rp->refType != RefTypeZeroInit) && !extendLifetimes(); } else if (rp->refType == RefTypeUse) { checkConflictingDefUse(rp); rp->lastUse = true; } } RefPosition* prevRP = theReferent->recentRefPosition; if (prevRP != nullptr) { prevRP->nextRefPosition = rp; } else { theReferent->firstRefPosition = rp; } theReferent->recentRefPosition = rp; theReferent->lastRefPosition = rp; } else { assert((rp->refType == RefTypeBB) || (rp->refType == RefTypeKillGCRefs)); } } //--------------------------------------------------------------------------- // newRefPosition: allocate and initialize a new RefPosition. // // Arguments: // reg - reg number that identifies RegRecord to be associated // with this RefPosition // theLocation - LSRA location of RefPosition // theRefType - RefPosition type // theTreeNode - GenTree node for which this RefPosition is created // mask - Set of valid registers for this RefPosition // multiRegIdx - register position if this RefPosition corresponds to a // multi-reg call node. // // Return Value: // a new RefPosition // RefPosition* LinearScan::newRefPosition( regNumber reg, LsraLocation theLocation, RefType theRefType, GenTree* theTreeNode, regMaskTP mask) { RefPosition* newRP = newRefPositionRaw(theLocation, theTreeNode, theRefType); RegRecord* regRecord = getRegisterRecord(reg); newRP->setReg(regRecord); newRP->registerAssignment = mask; newRP->setMultiRegIdx(0); newRP->setRegOptional(false); // We can't have two RefPositions on a RegRecord at the same location, unless they are different types. assert((regRecord->lastRefPosition == nullptr) || (regRecord->lastRefPosition->nodeLocation < theLocation) || (regRecord->lastRefPosition->refType != theRefType)); associateRefPosWithInterval(newRP); DBEXEC(VERBOSE, newRP->dump(this)); return newRP; } //--------------------------------------------------------------------------- // newRefPosition: allocate and initialize a new RefPosition. // // Arguments: // theInterval - interval to which RefPosition is associated with. // theLocation - LSRA location of RefPosition // theRefType - RefPosition type // theTreeNode - GenTree node for which this RefPosition is created // mask - Set of valid registers for this RefPosition // multiRegIdx - register position if this RefPosition corresponds to a // multi-reg call node. // // Return Value: // a new RefPosition // RefPosition* LinearScan::newRefPosition(Interval* theInterval, LsraLocation theLocation, RefType theRefType, GenTree* theTreeNode, regMaskTP mask, unsigned multiRegIdx /* = 0 */) { if (theInterval != nullptr) { if (mask == RBM_NONE) { mask = allRegs(theInterval->registerType); } } else { assert(theRefType == RefTypeBB || theRefType == RefTypeKillGCRefs); } #ifdef DEBUG if (theInterval != nullptr && regType(theInterval->registerType) == FloatRegisterType) { // In the case we're using floating point registers we must make sure // this flag was set previously in the compiler since this will mandate // whether LSRA will take into consideration FP reg killsets. assert(compiler->compFloatingPointUsed || ((mask & RBM_FLT_CALLEE_SAVED) == 0)); } #endif // DEBUG // If this reference is constrained to a single register (and it's not a dummy // or Kill reftype already), add a RefTypeFixedReg at this location so that its // availability can be more accurately determined bool isFixedRegister = isSingleRegister(mask); bool insertFixedRef = false; if (isFixedRegister) { // Insert a RefTypeFixedReg for any normal def or use (not ParamDef or BB), // but not an internal use (it will already have a FixedRef for the def). if ((theRefType == RefTypeDef) || ((theRefType == RefTypeUse) && !theInterval->isInternal)) { insertFixedRef = true; } } if (insertFixedRef) { regNumber physicalReg = genRegNumFromMask(mask); RefPosition* pos = newRefPosition(physicalReg, theLocation, RefTypeFixedReg, nullptr, mask); assert(theInterval != nullptr); assert((allRegs(theInterval->registerType) & mask) != 0); } RefPosition* newRP = newRefPositionRaw(theLocation, theTreeNode, theRefType); newRP->setInterval(theInterval); // Spill info newRP->isFixedRegRef = isFixedRegister; #ifndef TARGET_AMD64 // We don't need this for AMD because the PInvoke method epilog code is explicit // at register allocation time. if (theInterval != nullptr && theInterval->isLocalVar && compiler->compMethodRequiresPInvokeFrame() && theInterval->varNum == compiler->genReturnLocal) { mask &= ~(RBM_PINVOKE_TCB | RBM_PINVOKE_FRAME); noway_assert(mask != RBM_NONE); } #endif // !TARGET_AMD64 newRP->registerAssignment = mask; newRP->setMultiRegIdx(multiRegIdx); newRP->setRegOptional(false); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE newRP->skipSaveRestore = false; #endif associateRefPosWithInterval(newRP); if (RefTypeIsDef(newRP->refType)) { assert(theInterval != nullptr); theInterval->isSingleDef = theInterval->firstRefPosition == newRP; } DBEXEC(VERBOSE, newRP->dump(this)); return newRP; } //--------------------------------------------------------------------------- // newUseRefPosition: allocate and initialize a RefTypeUse RefPosition at currentLoc. // // Arguments: // theInterval - interval to which RefPosition is associated with. // theTreeNode - GenTree node for which this RefPosition is created // mask - Set of valid registers for this RefPosition // multiRegIdx - register position if this RefPosition corresponds to a // multi-reg call node. // minRegCount - Minimum number registers that needs to be ensured while // constraining candidates for this ref position under // LSRA stress. This is a DEBUG only arg. // // Return Value: // a new RefPosition // // Notes: // If the caller knows that 'theTreeNode' is NOT a candidate local, newRefPosition // can/should be called directly. // RefPosition* LinearScan::newUseRefPosition(Interval* theInterval, GenTree* theTreeNode, regMaskTP mask, unsigned multiRegIdx) { GenTree* treeNode = isCandidateLocalRef(theTreeNode) ? theTreeNode : nullptr; RefPosition* pos = newRefPosition(theInterval, currentLoc, RefTypeUse, treeNode, mask, multiRegIdx); if (theTreeNode->IsRegOptional()) { pos->setRegOptional(true); } return pos; } //------------------------------------------------------------------------ // IsContainableMemoryOp: Checks whether this is a memory op that can be contained. // // Arguments: // node - the node of interest. // // Return value: // True if this will definitely be a memory reference that could be contained. // // Notes: // This differs from the isMemoryOp() method on GenTree because it checks for // the case of doNotEnregister local. This won't include locals that // for some other reason do not become register candidates, nor those that get // spilled. // Also, because we usually call this before we redo dataflow, any new lclVars // introduced after the last dataflow analysis will not yet be marked lvTracked, // so we don't use that. // bool LinearScan::isContainableMemoryOp(GenTree* node) { if (node->isMemoryOp()) { return true; } if (node->IsLocal()) { if (!enregisterLocalVars) { return true; } const LclVarDsc* varDsc = compiler->lvaGetDesc(node->AsLclVar()); return varDsc->lvDoNotEnregister; } return false; } //------------------------------------------------------------------------ // addRefsForPhysRegMask: Adds RefPositions of the given type for all the registers in 'mask'. // // Arguments: // mask - the mask (set) of registers. // currentLoc - the location at which they should be added // refType - the type of refposition // isLastUse - true IFF this is a last use of the register // void LinearScan::addRefsForPhysRegMask(regMaskTP mask, LsraLocation currentLoc, RefType refType, bool isLastUse) { if (refType == RefTypeKill) { // The mask identifies a set of registers that will be used during // codegen. Mark these as modified here, so when we do final frame // layout, we'll know about all these registers. This is especially // important if mask contains callee-saved registers, which affect the // frame size since we need to save/restore them. In the case where we // have a copyBlk with GC pointers, can need to call the // CORINFO_HELP_ASSIGN_BYREF helper, which kills callee-saved RSI and // RDI, if LSRA doesn't assign RSI/RDI, they wouldn't get marked as // modified until codegen, which is too late. compiler->codeGen->regSet.rsSetRegsModified(mask DEBUGARG(true)); } for (regNumber reg = REG_FIRST; mask; reg = REG_NEXT(reg), mask >>= 1) { if (mask & 1) { // This assumes that these are all "special" RefTypes that // don't need to be recorded on the tree (hence treeNode is nullptr) RefPosition* pos = newRefPosition(reg, currentLoc, refType, nullptr, genRegMask(reg)); // This MUST occupy the physical register (obviously) if (isLastUse) { pos->lastUse = true; } } } } //------------------------------------------------------------------------ // getKillSetForStoreInd: Determine the liveness kill set for a GT_STOREIND node. // If the GT_STOREIND will generate a write barrier, determine the specific kill // set required by the case-specific, platform-specific write barrier. If no // write barrier is required, the kill set will be RBM_NONE. // // Arguments: // tree - the GT_STOREIND node // // Return Value: a register mask of the registers killed // regMaskTP LinearScan::getKillSetForStoreInd(GenTreeStoreInd* tree) { assert(tree->OperIs(GT_STOREIND)); regMaskTP killMask = RBM_NONE; GenTree* data = tree->Data(); GCInfo::WriteBarrierForm writeBarrierForm = compiler->codeGen->gcInfo.gcIsWriteBarrierCandidate(tree, data); if (writeBarrierForm != GCInfo::WBF_NoBarrier) { if (compiler->codeGen->genUseOptimizedWriteBarriers(writeBarrierForm)) { // We can't determine the exact helper to be used at this point, because it depends on // the allocated register for the `data` operand. However, all the (x86) optimized // helpers have the same kill set: EDX. And note that currently, only x86 can return // `true` for genUseOptimizedWriteBarriers(). killMask = RBM_CALLEE_TRASH_NOGC; } else { // Figure out which helper we're going to use, and then get the kill set for that helper. CorInfoHelpFunc helper = compiler->codeGen->genWriteBarrierHelperForWriteBarrierForm(tree, writeBarrierForm); killMask = compiler->compHelperCallKillSet(helper); } } return killMask; } //------------------------------------------------------------------------ // getKillSetForShiftRotate: Determine the liveness kill set for a shift or rotate node. // // Arguments: // shiftNode - the shift or rotate node // // Return Value: a register mask of the registers killed // regMaskTP LinearScan::getKillSetForShiftRotate(GenTreeOp* shiftNode) { regMaskTP killMask = RBM_NONE; #ifdef TARGET_XARCH assert(shiftNode->OperIsShiftOrRotate()); GenTree* shiftBy = shiftNode->gtGetOp2(); if (!shiftBy->isContained()) { killMask = RBM_RCX; } #endif // TARGET_XARCH return killMask; } //------------------------------------------------------------------------ // getKillSetForMul: Determine the liveness kill set for a multiply node. // // Arguments: // tree - the multiply node // // Return Value: a register mask of the registers killed // regMaskTP LinearScan::getKillSetForMul(GenTreeOp* mulNode) { regMaskTP killMask = RBM_NONE; #ifdef TARGET_XARCH assert(mulNode->OperIsMul()); if (!mulNode->OperIs(GT_MUL) || (((mulNode->gtFlags & GTF_UNSIGNED) != 0) && mulNode->gtOverflowEx())) { killMask = RBM_RAX | RBM_RDX; } #endif // TARGET_XARCH return killMask; } //------------------------------------------------------------------------ // getKillSetForModDiv: Determine the liveness kill set for a mod or div node. // // Arguments: // tree - the mod or div node as a GenTreeOp // // Return Value: a register mask of the registers killed // regMaskTP LinearScan::getKillSetForModDiv(GenTreeOp* node) { regMaskTP killMask = RBM_NONE; #ifdef TARGET_XARCH assert(node->OperIs(GT_MOD, GT_DIV, GT_UMOD, GT_UDIV)); if (!varTypeIsFloating(node->TypeGet())) { // Both RAX and RDX are killed by the operation killMask = RBM_RAX | RBM_RDX; } #endif // TARGET_XARCH return killMask; } //------------------------------------------------------------------------ // getKillSetForCall: Determine the liveness kill set for a call node. // // Arguments: // tree - the GenTreeCall node // // Return Value: a register mask of the registers killed // regMaskTP LinearScan::getKillSetForCall(GenTreeCall* call) { regMaskTP killMask = RBM_CALLEE_TRASH; #ifdef TARGET_X86 if (compiler->compFloatingPointUsed) { if (call->TypeGet() == TYP_DOUBLE) { needDoubleTmpForFPCall = true; } else if (call->TypeGet() == TYP_FLOAT) { needFloatTmpForFPCall = true; } } #endif // TARGET_X86 if (call->IsHelperCall()) { CorInfoHelpFunc helpFunc = compiler->eeGetHelperNum(call->gtCallMethHnd); killMask = compiler->compHelperCallKillSet(helpFunc); } // if there is no FP used, we can ignore the FP kills if (!compiler->compFloatingPointUsed) { killMask &= ~RBM_FLT_CALLEE_TRASH; } #ifdef TARGET_ARM if (call->IsVirtualStub()) { killMask |= compiler->virtualStubParamInfo->GetRegMask(); } #else // !TARGET_ARM // Verify that the special virtual stub call registers are in the kill mask. // We don't just add them unconditionally to the killMask because for most architectures // they are already in the RBM_CALLEE_TRASH set, // and we don't want to introduce extra checks and calls in this hot function. assert(!call->IsVirtualStub() || ((killMask & compiler->virtualStubParamInfo->GetRegMask()) == compiler->virtualStubParamInfo->GetRegMask())); #endif // !TARGET_ARM return killMask; } //------------------------------------------------------------------------ // getKillSetForBlockStore: Determine the liveness kill set for a block store node. // // Arguments: // tree - the block store node as a GenTreeBlk // // Return Value: a register mask of the registers killed // regMaskTP LinearScan::getKillSetForBlockStore(GenTreeBlk* blkNode) { assert(blkNode->OperIsStore()); regMaskTP killMask = RBM_NONE; if ((blkNode->OperGet() == GT_STORE_OBJ) && blkNode->OperIsCopyBlkOp()) { assert(blkNode->AsObj()->GetLayout()->HasGCPtr()); killMask = compiler->compHelperCallKillSet(CORINFO_HELP_ASSIGN_BYREF); } else { bool isCopyBlk = varTypeIsStruct(blkNode->Data()); switch (blkNode->gtBlkOpKind) { #ifndef TARGET_X86 case GenTreeBlk::BlkOpKindHelper: if (isCopyBlk) { killMask = compiler->compHelperCallKillSet(CORINFO_HELP_MEMCPY); } else { killMask = compiler->compHelperCallKillSet(CORINFO_HELP_MEMSET); } break; #endif #ifdef TARGET_XARCH case GenTreeBlk::BlkOpKindRepInstr: if (isCopyBlk) { // rep movs kills RCX, RDI and RSI killMask = RBM_RCX | RBM_RDI | RBM_RSI; } else { // rep stos kills RCX and RDI. // (Note that the Data() node, if not constant, will be assigned to // RCX, but it's find that this kills it, as the value is not available // after this node in any case.) killMask = RBM_RDI | RBM_RCX; } break; #endif case GenTreeBlk::BlkOpKindUnroll: case GenTreeBlk::BlkOpKindInvalid: // for these 'gtBlkOpKind' kinds, we leave 'killMask' = RBM_NONE break; } } return killMask; } #ifdef FEATURE_HW_INTRINSICS //------------------------------------------------------------------------ // getKillSetForHWIntrinsic: Determine the liveness kill set for a GT_STOREIND node. // If the GT_STOREIND will generate a write barrier, determine the specific kill // set required by the case-specific, platform-specific write barrier. If no // write barrier is required, the kill set will be RBM_NONE. // // Arguments: // tree - the GT_STOREIND node // // Return Value: a register mask of the registers killed // regMaskTP LinearScan::getKillSetForHWIntrinsic(GenTreeHWIntrinsic* node) { regMaskTP killMask = RBM_NONE; #ifdef TARGET_XARCH switch (node->GetHWIntrinsicId()) { case NI_SSE2_MaskMove: // maskmovdqu uses edi as the implicit address register. // Although it is set as the srcCandidate on the address, if there is also a fixed // assignment for the definition of the address, resolveConflictingDefAndUse() may // change the register assignment on the def or use of a tree temp (SDSU) when there // is a conflict, and the FixedRef on edi won't be sufficient to ensure that another // Interval will not be allocated there. // Issue #17674 tracks this. killMask = RBM_EDI; break; default: // Leave killMask as RBM_NONE break; } #endif // TARGET_XARCH return killMask; } #endif // FEATURE_HW_INTRINSICS //------------------------------------------------------------------------ // getKillSetForReturn: Determine the liveness kill set for a return node. // // Arguments: // NONE (this kill set is independent of the details of the specific return.) // // Return Value: a register mask of the registers killed // regMaskTP LinearScan::getKillSetForReturn() { return compiler->compIsProfilerHookNeeded() ? compiler->compHelperCallKillSet(CORINFO_HELP_PROF_FCN_LEAVE) : RBM_NONE; } //------------------------------------------------------------------------ // getKillSetForProfilerHook: Determine the liveness kill set for a profiler hook. // // Arguments: // NONE (this kill set is independent of the details of the specific node.) // // Return Value: a register mask of the registers killed // regMaskTP LinearScan::getKillSetForProfilerHook() { return compiler->compIsProfilerHookNeeded() ? compiler->compHelperCallKillSet(CORINFO_HELP_PROF_FCN_TAILCALL) : RBM_NONE; } #ifdef DEBUG //------------------------------------------------------------------------ // getKillSetForNode: Return the registers killed by the given tree node. // // Arguments: // tree - the tree for which the kill set is needed. // // Return Value: a register mask of the registers killed // regMaskTP LinearScan::getKillSetForNode(GenTree* tree) { regMaskTP killMask = RBM_NONE; switch (tree->OperGet()) { case GT_LSH: case GT_RSH: case GT_RSZ: case GT_ROL: case GT_ROR: #ifdef TARGET_X86 case GT_LSH_HI: case GT_RSH_LO: #endif killMask = getKillSetForShiftRotate(tree->AsOp()); break; case GT_MUL: case GT_MULHI: #if !defined(TARGET_64BIT) || defined(TARGET_ARM64) case GT_MUL_LONG: #endif killMask = getKillSetForMul(tree->AsOp()); break; case GT_MOD: case GT_DIV: case GT_UMOD: case GT_UDIV: killMask = getKillSetForModDiv(tree->AsOp()); break; case GT_STORE_OBJ: case GT_STORE_BLK: case GT_STORE_DYN_BLK: killMask = getKillSetForBlockStore(tree->AsBlk()); break; case GT_RETURNTRAP: killMask = compiler->compHelperCallKillSet(CORINFO_HELP_STOP_FOR_GC); break; case GT_CALL: killMask = getKillSetForCall(tree->AsCall()); break; case GT_STOREIND: killMask = getKillSetForStoreInd(tree->AsStoreInd()); break; #if defined(PROFILING_SUPPORTED) // If this method requires profiler ELT hook then mark these nodes as killing // callee trash registers (excluding RAX and XMM0). The reason for this is that // profiler callback would trash these registers. See vm\amd64\asmhelpers.asm for // more details. case GT_RETURN: killMask = getKillSetForReturn(); break; case GT_PROF_HOOK: killMask = getKillSetForProfilerHook(); break; #endif // PROFILING_SUPPORTED #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: killMask = getKillSetForHWIntrinsic(tree->AsHWIntrinsic()); break; #endif // FEATURE_HW_INTRINSICS default: // for all other 'tree->OperGet()' kinds, leave 'killMask' = RBM_NONE break; } return killMask; } #endif // DEBUG //------------------------------------------------------------------------ // buildKillPositionsForNode: // Given some tree node add refpositions for all the registers this node kills // // Arguments: // tree - the tree for which kill positions should be generated // currentLoc - the location at which the kills should be added // killMask - The mask of registers killed by this node // // Return Value: // true - kills were inserted // false - no kills were inserted // // Notes: // The return value is needed because if we have any kills, we need to make sure that // all defs are located AFTER the kills. On the other hand, if there aren't kills, // the multiple defs for a regPair are in different locations. // If we generate any kills, we will mark all currentLiveVars as being preferenced // to avoid the killed registers. This is somewhat conservative. // // This method can add kills even if killMask is RBM_NONE, if this tree is one of the // special cases that signals that we can't permit callee save registers to hold GC refs. bool LinearScan::buildKillPositionsForNode(GenTree* tree, LsraLocation currentLoc, regMaskTP killMask) { bool insertedKills = false; if (killMask != RBM_NONE) { addRefsForPhysRegMask(killMask, currentLoc, RefTypeKill, true); // TODO-CQ: It appears to be valuable for both fp and int registers to avoid killing the callee // save regs on infrequently executed paths. However, it results in a large number of asmDiffs, // many of which appear to be regressions (because there is more spill on the infrequently path), // but are not really because the frequent path becomes smaller. Validating these diffs will need // to be done before making this change. // Also note that we avoid setting callee-save preferences for floating point. This may need // revisiting, and note that it doesn't currently apply to SIMD types, only float or double. // if (!blockSequence[curBBSeqNum]->isRunRarely()) if (enregisterLocalVars) { VarSetOps::Iter iter(compiler, currentLiveVars); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { LclVarDsc* varDsc = compiler->lvaGetDescByTrackedIndex(varIndex); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE if (Compiler::varTypeNeedsPartialCalleeSave(varDsc->GetRegisterType())) { if (!VarSetOps::IsMember(compiler, largeVectorCalleeSaveCandidateVars, varIndex)) { continue; } } else #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE if (varTypeIsFloating(varDsc) && !VarSetOps::IsMember(compiler, fpCalleeSaveCandidateVars, varIndex)) { continue; } Interval* interval = getIntervalForLocalVar(varIndex); const bool isCallKill = ((killMask == RBM_INT_CALLEE_TRASH) || (killMask == RBM_CALLEE_TRASH)); if (isCallKill) { interval->preferCalleeSave = true; } // We are more conservative about allocating callee-saves registers to write-thru vars, since // a call only requires reloading after (not spilling before). So we record (above) the fact // that we'd prefer a callee-save register, but we don't update the preferences at this point. // See the "heuristics for writeThru intervals" in 'buildIntervals()'. if (!interval->isWriteThru || !isCallKill) { regMaskTP newPreferences = allRegs(interval->registerType) & (~killMask); if (newPreferences != RBM_NONE) { interval->updateRegisterPreferences(newPreferences); } else { // If there are no callee-saved registers, the call could kill all the registers. // This is a valid state, so in that case assert should not trigger. The RA will spill in order // to free a register later. assert(compiler->opts.compDbgEnC || (calleeSaveRegs(varDsc->lvType)) == RBM_NONE); } } } } insertedKills = true; } if (compiler->killGCRefs(tree)) { RefPosition* pos = newRefPosition((Interval*)nullptr, currentLoc, RefTypeKillGCRefs, tree, (allRegs(TYP_REF) & ~RBM_ARG_REGS)); insertedKills = true; } return insertedKills; } //------------------------------------------------------------------------ // LinearScan::isCandidateMultiRegLclVar: Check whether a MultiReg node should // remain a candidate MultiReg // // Arguments: // lclNode - the GT_LCL_VAR or GT_STORE_LCL_VAR of interest // // Return Value: // true iff it remains a MultiReg lclVar. // // Notes: // When identifying candidates, the register allocator will only retain // promoted fields of a multi-reg local as candidates if all of its fields // are candidates. This is because of the added complexity of dealing with a // def or use of a multi-reg lclVar when only some of the fields have liveness // info. // At the time we determine whether a multi-reg lclVar can still be handled // as such, we've already completed Lowering, so during the build phase of // LSRA we have to reset the GTF_VAR_MULTIREG flag if necessary as we visit // each node. // bool LinearScan::isCandidateMultiRegLclVar(GenTreeLclVar* lclNode) { assert(compiler->lvaEnregMultiRegVars && lclNode->IsMultiReg()); LclVarDsc* varDsc = compiler->lvaGetDesc(lclNode); assert(varDsc->lvPromoted); bool isMultiReg = (compiler->lvaGetPromotionType(varDsc) == Compiler::PROMOTION_TYPE_INDEPENDENT); if (!isMultiReg) { lclNode->ClearMultiReg(); } #ifdef DEBUG for (unsigned int i = 0; i < varDsc->lvFieldCnt; i++) { LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + i); assert(isCandidateVar(fieldVarDsc) == isMultiReg); } #endif // DEBUG return isMultiReg; } //------------------------------------------------------------------------ // checkContainedOrCandidateLclVar: Check whether a GT_LCL_VAR node is a // candidate or contained. // // Arguments: // lclNode - the GT_LCL_VAR or GT_STORE_LCL_VAR of interest // // Return Value: // true if the node remains a candidate or is contained // false otherwise (i.e. if it will define a register) // // Notes: // We handle candidate variables differently from non-candidate ones. // If it is a candidate, we will simply add a use of it at its parent/consumer. // Otherwise, for a use we need to actually add the appropriate references for loading // or storing the variable. // // A candidate lclVar won't actually get used until the appropriate ancestor node // is processed, unless this is marked "isLocalDefUse" because it is a stack-based argument // to a call or an orphaned dead node. // // Also, because we do containment analysis before we redo dataflow and identify register // candidates, the containment analysis only uses !lvDoNotEnregister to estimate register // candidates. // If there is a lclVar that is estimated during Lowering to be register candidate but turns // out not to be, if a use was marked regOptional it should now be marked contained instead. // bool LinearScan::checkContainedOrCandidateLclVar(GenTreeLclVar* lclNode) { bool isCandidate; bool makeContained = false; // We shouldn't be calling this if this node was already contained. assert(!lclNode->isContained()); // If we have a multireg local, verify that its fields are still register candidates. if (lclNode->IsMultiReg()) { // Multi-reg uses must support containment, but if we have an actual multi-reg local // we don't want it to be RegOptional in fixed-use cases, so that we can ensure proper // liveness modeling (e.g. if one field is in a register required by another field, in // a RegOptional case we won't handle the conflict properly if we decide not to allocate). isCandidate = isCandidateMultiRegLclVar(lclNode); if (isCandidate) { assert(!lclNode->IsRegOptional()); } else { makeContained = true; } } else { isCandidate = compiler->lvaGetDesc(lclNode)->lvLRACandidate; makeContained = !isCandidate && lclNode->IsRegOptional(); } if (makeContained) { lclNode->ClearRegOptional(); lclNode->SetContained(); return true; } return isCandidate; } //---------------------------------------------------------------------------- // defineNewInternalTemp: Defines a ref position for an internal temp. // // Arguments: // tree - Gentree node requiring an internal register // regType - Register type // currentLoc - Location of the temp Def position // regMask - register mask of candidates for temp // RefPosition* LinearScan::defineNewInternalTemp(GenTree* tree, RegisterType regType, regMaskTP regMask) { Interval* current = newInterval(regType); current->isInternal = true; RefPosition* newDef = newRefPosition(current, currentLoc, RefTypeDef, tree, regMask, 0); assert(internalCount < MaxInternalCount); internalDefs[internalCount++] = newDef; return newDef; } //------------------------------------------------------------------------ // buildInternalRegisterDefForNode - Create an Interval for an internal int register, and a def RefPosition // // Arguments: // tree - Gentree node that needs internal registers // internalCands - The mask of valid registers // // Returns: // The def RefPosition created for this internal temp. // RefPosition* LinearScan::buildInternalIntRegisterDefForNode(GenTree* tree, regMaskTP internalCands) { // The candidate set should contain only integer registers. assert((internalCands & ~allRegs(TYP_INT)) == RBM_NONE); RefPosition* defRefPosition = defineNewInternalTemp(tree, IntRegisterType, internalCands); return defRefPosition; } //------------------------------------------------------------------------ // buildInternalFloatRegisterDefForNode - Create an Interval for an internal fp register, and a def RefPosition // // Arguments: // tree - Gentree node that needs internal registers // internalCands - The mask of valid registers // // Returns: // The def RefPosition created for this internal temp. // RefPosition* LinearScan::buildInternalFloatRegisterDefForNode(GenTree* tree, regMaskTP internalCands) { // The candidate set should contain only float registers. assert((internalCands & ~allRegs(TYP_FLOAT)) == RBM_NONE); RefPosition* defRefPosition = defineNewInternalTemp(tree, FloatRegisterType, internalCands); return defRefPosition; } //------------------------------------------------------------------------ // buildInternalRegisterUses - adds use positions for internal // registers required for tree node. // // Notes: // During the BuildNode process, calls to buildInternalIntRegisterDefForNode and // buildInternalFloatRegisterDefForNode put new RefPositions in the 'internalDefs' // array, and increment 'internalCount'. This method must be called to add corresponding // uses. It then resets the 'internalCount' for the handling of the next node. // // If the internal registers must differ from the target register, 'setInternalRegsDelayFree' // must be set to true, so that the uses may be marked 'delayRegFree'. // Note that if a node has both float and int temps, generally the target with either be // int *or* float, and it is not really necessary to set this on the other type, but it does // no harm as it won't restrict the register selection. // void LinearScan::buildInternalRegisterUses() { assert(internalCount <= MaxInternalCount); for (int i = 0; i < internalCount; i++) { RefPosition* def = internalDefs[i]; regMaskTP mask = def->registerAssignment; RefPosition* use = newRefPosition(def->getInterval(), currentLoc, RefTypeUse, def->treeNode, mask, 0); if (setInternalRegsDelayFree) { use->delayRegFree = true; pendingDelayFree = true; } } // internalCount = 0; } #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE //------------------------------------------------------------------------ // makeUpperVectorInterval - Create an Interval for saving and restoring // the upper half of a large vector. // // Arguments: // varIndex - The tracked index for a large vector lclVar. // void LinearScan::makeUpperVectorInterval(unsigned varIndex) { Interval* lclVarInterval = getIntervalForLocalVar(varIndex); assert(Compiler::varTypeNeedsPartialCalleeSave(lclVarInterval->registerType)); Interval* newInt = newInterval(LargeVectorSaveType); newInt->relatedInterval = lclVarInterval; newInt->isUpperVector = true; } //------------------------------------------------------------------------ // getUpperVectorInterval - Get the Interval for saving and restoring // the upper half of a large vector. // // Arguments: // varIndex - The tracked index for a large vector lclVar. // Interval* LinearScan::getUpperVectorInterval(unsigned varIndex) { // TODO-Throughput: Consider creating a map from varIndex to upperVector interval. for (Interval& interval : intervals) { if (interval.isLocalVar) { continue; } noway_assert(interval.isUpperVector); if (interval.relatedInterval->getVarIndex(compiler) == varIndex) { return &interval; } } unreached(); } //------------------------------------------------------------------------ // buildUpperVectorSaveRefPositions - Create special RefPositions for saving // the upper half of a set of large vectors. // // Arguments: // tree - The current node being handled // currentLoc - The location of the current node // fpCalleeKillSet - The set of registers killed by this node. // // Notes: This is called by BuildDefsWithKills for any node that kills registers in the // RBM_FLT_CALLEE_TRASH set. We actually need to find any calls that kill the upper-half // of the callee-save vector registers. // But we will use as a proxy any node that kills floating point registers. // (Note that some calls are masquerading as other nodes at this point so we can't just check for calls.) // void LinearScan::buildUpperVectorSaveRefPositions(GenTree* tree, LsraLocation currentLoc, regMaskTP fpCalleeKillSet) { if ((tree != nullptr) && tree->IsCall()) { if (tree->AsCall()->IsNoReturn() || compiler->fgIsThrow(tree)) { // No point in having vector save/restore if the call will not return. return; } } if (enregisterLocalVars && !VarSetOps::IsEmpty(compiler, largeVectorVars)) { // We assume that the kill set includes at least some callee-trash registers, but // that it doesn't include any callee-save registers. assert((fpCalleeKillSet & RBM_FLT_CALLEE_TRASH) != RBM_NONE); assert((fpCalleeKillSet & RBM_FLT_CALLEE_SAVED) == RBM_NONE); // We only need to save the upper half of any large vector vars that are currently live. VARSET_TP liveLargeVectors(VarSetOps::Intersection(compiler, currentLiveVars, largeVectorVars)); VarSetOps::Iter iter(compiler, liveLargeVectors); unsigned varIndex = 0; bool isThrowBlock = compiler->compCurBB->KindIs(BBJ_THROW); while (iter.NextElem(&varIndex)) { Interval* varInterval = getIntervalForLocalVar(varIndex); if (!varInterval->isPartiallySpilled) { Interval* upperVectorInterval = getUpperVectorInterval(varIndex); RefPosition* pos = newRefPosition(upperVectorInterval, currentLoc, RefTypeUpperVectorSave, tree, RBM_FLT_CALLEE_SAVED); varInterval->isPartiallySpilled = true; pos->skipSaveRestore = isThrowBlock; #ifdef TARGET_XARCH pos->regOptional = true; #endif } } } // For any non-lclVar intervals that are live at this point (i.e. in the DefList), we will also create // a RefTypeUpperVectorSave. For now these will all be spilled at this point, as we don't currently // have a mechanism to communicate any non-lclVar intervals that need to be restored. // TODO-CQ: We could consider adding such a mechanism, but it's unclear whether this rare // case of a large vector temp live across a call is worth the added complexity. for (RefInfoListNode *listNode = defList.Begin(), *end = defList.End(); listNode != end; listNode = listNode->Next()) { const GenTree* defNode = listNode->treeNode; var_types regType = defNode->TypeGet(); if (regType == TYP_STRUCT) { assert(defNode->OperIs(GT_LCL_VAR, GT_CALL)); if (defNode->OperIs(GT_LCL_VAR)) { const GenTreeLclVar* lcl = defNode->AsLclVar(); const LclVarDsc* varDsc = compiler->lvaGetDesc(lcl); regType = varDsc->GetRegisterType(); } else { const GenTreeCall* call = defNode->AsCall(); const CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd; Compiler::structPassingKind howToReturnStruct; regType = compiler->getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct); if (howToReturnStruct == Compiler::SPK_ByValueAsHfa) { regType = compiler->GetHfaType(retClsHnd); } #if defined(TARGET_ARM64) else if (howToReturnStruct == Compiler::SPK_ByValue) { // TODO-Cleanup: add a new Compiler::SPK for this case. // This is the case when 16-byte struct is returned as [x0, x1]. // We don't need a partial callee save. regType = TYP_LONG; } #endif // TARGET_ARM64 } assert((regType != TYP_STRUCT) && (regType != TYP_UNDEF)); } if (Compiler::varTypeNeedsPartialCalleeSave(regType)) { // In the rare case where such an interval is live across nested calls, we don't need to insert another. if (listNode->ref->getInterval()->recentRefPosition->refType != RefTypeUpperVectorSave) { RefPosition* pos = newRefPosition(listNode->ref->getInterval(), currentLoc, RefTypeUpperVectorSave, tree, RBM_FLT_CALLEE_SAVED); } } } } //------------------------------------------------------------------------ // buildUpperVectorRestoreRefPosition - Create a RefPosition for restoring // the upper half of a large vector. // // Arguments: // lclVarInterval - A lclVarInterval that is live at 'currentLoc' // currentLoc - The current location for which we're building RefPositions // node - The node, if any, that the restore would be inserted before. // If null, the restore will be inserted at the end of the block. // isUse - If the refPosition that is about to be created represents a use or not. // - If not, it would be the one at the end of the block. // void LinearScan::buildUpperVectorRestoreRefPosition(Interval* lclVarInterval, LsraLocation currentLoc, GenTree* node, bool isUse) { if (lclVarInterval->isPartiallySpilled) { unsigned varIndex = lclVarInterval->getVarIndex(compiler); Interval* upperVectorInterval = getUpperVectorInterval(varIndex); RefPosition* savePos = upperVectorInterval->recentRefPosition; RefPosition* restorePos = newRefPosition(upperVectorInterval, currentLoc, RefTypeUpperVectorRestore, node, RBM_NONE); lclVarInterval->isPartiallySpilled = false; if (isUse) { // If there was a use of the restore before end of the block restore, // then it is needed and cannot be eliminated savePos->skipSaveRestore = false; } else { // otherwise, just do the whatever was decided for save position restorePos->skipSaveRestore = savePos->skipSaveRestore; } #ifdef TARGET_XARCH restorePos->regOptional = true; #endif } } #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE #ifdef DEBUG //------------------------------------------------------------------------ // ComputeOperandDstCount: computes the number of registers defined by a // node. // // For most nodes, this is simple: // - Nodes that do not produce values (e.g. stores and other void-typed // nodes) and nodes that immediately use the registers they define // produce no registers // - Nodes that are marked as defining N registers define N registers. // // For contained nodes, however, things are more complicated: for purposes // of bookkeeping, a contained node is treated as producing the transitive // closure of the registers produced by its sources. // // Arguments: // operand - The operand for which to compute a register count. // // Returns: // The number of registers defined by `operand`. // int LinearScan::ComputeOperandDstCount(GenTree* operand) { // GT_ARGPLACE is the only non-LIR node that is currently in the trees at this stage, though // note that it is not in the linear order. if (operand->OperIs(GT_ARGPLACE)) { return 0; } if (operand->isContained()) { int dstCount = 0; for (GenTree* op : operand->Operands()) { dstCount += ComputeOperandDstCount(op); } return dstCount; } if (operand->IsUnusedValue()) { // Operands that define an unused value do not produce any registers. return 0; } if (operand->IsValue()) { // Operands that are values and are not contained consume all of their operands // and produce one or more registers. return operand->GetRegisterDstCount(compiler); } else { // This must be one of the operand types that are neither contained nor produce a value. // Stores and void-typed operands may be encountered when processing call nodes, which contain // pointers to argument setup stores. assert(operand->OperIsStore() || operand->OperIsBlkOp() || operand->OperIsPutArgStk() || operand->OperIsCompare() || operand->OperIs(GT_CMP) || operand->TypeGet() == TYP_VOID); return 0; } } //------------------------------------------------------------------------ // ComputeAvailableSrcCount: computes the number of registers available as // sources for a node. // // This is simply the sum of the number of registers produced by each // operand to the node. // // Arguments: // node - The node for which to compute a source count. // // Return Value: // The number of registers available as sources for `node`. // int LinearScan::ComputeAvailableSrcCount(GenTree* node) { int numSources = 0; for (GenTree* operand : node->Operands()) { numSources += ComputeOperandDstCount(operand); } return numSources; } #endif // DEBUG //------------------------------------------------------------------------ // buildRefPositionsForNode: The main entry point for building the RefPositions // and "tree temp" Intervals for a given node. // // Arguments: // tree - The node for which we are building RefPositions // currentLoc - The LsraLocation of the given node // void LinearScan::buildRefPositionsForNode(GenTree* tree, LsraLocation currentLoc) { // The LIR traversal doesn't visit GT_ARGPLACE nodes. // GT_CLS_VAR nodes should have been eliminated by rationalizer. assert(tree->OperGet() != GT_ARGPLACE); assert(tree->OperGet() != GT_CLS_VAR); // The set of internal temporary registers used by this node are stored in the // gtRsvdRegs register mask. Clear it out. tree->gtRsvdRegs = RBM_NONE; #ifdef DEBUG if (VERBOSE) { dumpDefList(); compiler->gtDispTree(tree, nullptr, nullptr, true); } #endif // DEBUG if (tree->isContained()) { #ifdef TARGET_XARCH // On XArch we can have contained candidate lclVars if they are part of a RMW // address computation. In this case we need to check whether it is a last use. if (tree->IsLocal() && ((tree->gtFlags & GTF_VAR_DEATH) != 0)) { LclVarDsc* const varDsc = compiler->lvaGetDesc(tree->AsLclVarCommon()); if (isCandidateVar(varDsc)) { assert(varDsc->lvTracked); unsigned varIndex = varDsc->lvVarIndex; VarSetOps::RemoveElemD(compiler, currentLiveVars, varIndex); } } #else // TARGET_XARCH assert(!isCandidateLocalRef(tree)); #endif // TARGET_XARCH JITDUMP("Contained\n"); return; } #ifdef DEBUG // If we are constraining the registers for allocation, we will modify all the RefPositions // we've built for this node after we've created them. In order to do that, we'll remember // the last RefPosition prior to those created for this node. RefPositionIterator refPositionMark = refPositions.backPosition(); int oldDefListCount = defList.Count(); #endif // DEBUG int consume = BuildNode(tree); #ifdef DEBUG int newDefListCount = defList.Count(); // Currently produce is unused, but need to strengthen an assert to check if produce is // as expected. See https://github.com/dotnet/runtime/issues/8678 int produce = newDefListCount - oldDefListCount; assert((consume == 0) || (ComputeAvailableSrcCount(tree) == consume)); // If we are constraining registers, modify all the RefPositions we've just built to specify the // minimum reg count required. if ((getStressLimitRegs() != LSRA_LIMIT_NONE) || (getSelectionHeuristics() != LSRA_SELECT_DEFAULT)) { // The number of registers required for a tree node is the sum of // { RefTypeUses } + { RefTypeDef for the node itself } + specialPutArgCount // This is the minimum set of registers that needs to be ensured in the candidate set of ref positions created. // // First, we count them. unsigned minRegCount = 0; RefPositionIterator iter = refPositionMark; for (iter++; iter != refPositions.end(); iter++) { RefPosition* newRefPosition = &(*iter); if (newRefPosition->isIntervalRef()) { if ((newRefPosition->refType == RefTypeUse) || ((newRefPosition->refType == RefTypeDef) && !newRefPosition->getInterval()->isInternal)) { minRegCount++; } #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE else if (newRefPosition->refType == RefTypeUpperVectorSave) { minRegCount++; } #endif if (newRefPosition->getInterval()->isSpecialPutArg) { minRegCount++; } } } if (tree->OperIsPutArgSplit()) { // While we have attempted to account for any "specialPutArg" defs above, we're only looking at RefPositions // created for this node. We must be defining at least one register in the PutArgSplit, so conservatively // add one less than the maximum number of registers args to 'minRegCount'. minRegCount += MAX_REG_ARG - 1; } for (refPositionMark++; refPositionMark != refPositions.end(); refPositionMark++) { RefPosition* newRefPosition = &(*refPositionMark); unsigned minRegCountForRef = minRegCount; if (RefTypeIsUse(newRefPosition->refType) && newRefPosition->delayRegFree) { // If delayRegFree, then Use will interfere with the destination of the consuming node. // Therefore, we also need add the kill set of the consuming node to minRegCount. // // For example consider the following IR on x86, where v01 and v02 // are method args coming in ecx and edx respectively. // GT_DIV(v01, v02) // // For GT_DIV, the minRegCount will be 3 without adding kill set of GT_DIV node. // // Assume further JitStressRegs=2, which would constrain candidates to callee trashable // regs { eax, ecx, edx } on use positions of v01 and v02. LSRA allocates ecx for v01. // The use position of v02 cannot be allocated a reg since it is marked delay-reg free and // {eax,edx} are getting killed before the def of GT_DIV. For this reason, minRegCount for // the use position of v02 also needs to take into account the kill set of its consuming node. regMaskTP killMask = getKillSetForNode(tree); if (killMask != RBM_NONE) { minRegCountForRef += genCountBits(killMask); } } else if ((newRefPosition->refType) == RefTypeDef && (newRefPosition->getInterval()->isSpecialPutArg)) { minRegCountForRef++; } newRefPosition->minRegCandidateCount = minRegCountForRef; if (newRefPosition->IsActualRef() && doReverseCallerCallee()) { Interval* interval = newRefPosition->getInterval(); regMaskTP oldAssignment = newRefPosition->registerAssignment; regMaskTP calleeSaveMask = calleeSaveRegs(interval->registerType); newRefPosition->registerAssignment = getConstrainedRegMask(oldAssignment, calleeSaveMask, minRegCountForRef); if ((newRefPosition->registerAssignment != oldAssignment) && (newRefPosition->refType == RefTypeUse) && !interval->isLocalVar) { checkConflictingDefUse(newRefPosition); } } } } #endif // DEBUG JITDUMP("\n"); } static const regNumber lsraRegOrder[] = {REG_VAR_ORDER}; const unsigned lsraRegOrderSize = ArrLen(lsraRegOrder); static const regNumber lsraRegOrderFlt[] = {REG_VAR_ORDER_FLT}; const unsigned lsraRegOrderFltSize = ArrLen(lsraRegOrderFlt); //------------------------------------------------------------------------ // buildPhysRegRecords: Make an interval for each physical register // void LinearScan::buildPhysRegRecords() { for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg)) { RegRecord* curr = &physRegs[reg]; curr->init(reg); } for (unsigned int i = 0; i < lsraRegOrderSize; i++) { regNumber reg = lsraRegOrder[i]; RegRecord* curr = &physRegs[reg]; curr->regOrder = (unsigned char)i; } for (unsigned int i = 0; i < lsraRegOrderFltSize; i++) { regNumber reg = lsraRegOrderFlt[i]; RegRecord* curr = &physRegs[reg]; curr->regOrder = (unsigned char)i; } } //------------------------------------------------------------------------ // insertZeroInitRefPositions: Handle lclVars that are live-in to the first block // // Notes: // Prior to calling this method, 'currentLiveVars' must be set to the set of register // candidate variables that are liveIn to the first block. // For each register candidate that is live-in to the first block: // - If it is a GC ref, or if compInitMem is set, a ZeroInit RefPosition will be created. // - Otherwise, it will be marked as spilled, since it will not be assigned a register // on entry and will be loaded from memory on the undefined path. // Note that, when the compInitMem option is not set, we may encounter these on // paths that are protected by the same condition as an earlier def. However, since // we don't do the analysis to determine this - and couldn't rely on always identifying // such cases even if we tried - we must conservatively treat the undefined path as // being possible. This is a relatively rare case, so the introduced conservatism is // not expected to warrant the analysis required to determine the best placement of // an initialization. // void LinearScan::insertZeroInitRefPositions() { assert(enregisterLocalVars); #ifdef DEBUG VARSET_TP expectedLiveVars(VarSetOps::Intersection(compiler, registerCandidateVars, compiler->fgFirstBB->bbLiveIn)); assert(VarSetOps::Equal(compiler, currentLiveVars, expectedLiveVars)); #endif // DEBUG // insert defs for this, then a block boundary VarSetOps::Iter iter(compiler, currentLiveVars); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { LclVarDsc* varDsc = compiler->lvaGetDescByTrackedIndex(varIndex); if (!varDsc->lvIsParam && isCandidateVar(varDsc)) { JITDUMP("V%02u was live in to first block:", compiler->lvaTrackedIndexToLclNum(varIndex)); Interval* interval = getIntervalForLocalVar(varIndex); if (compiler->info.compInitMem || varTypeIsGC(varDsc->TypeGet())) { varDsc->lvMustInit = true; // OSR will handle init of locals and promoted fields thereof if (compiler->lvaIsOSRLocal(compiler->lvaTrackedIndexToLclNum(varIndex))) { JITDUMP(" will be initialized by OSR\n"); // setIntervalAsSpilled(interval); varDsc->lvMustInit = false; } JITDUMP(" creating ZeroInit\n"); RefPosition* pos = newRefPosition(interval, MinLocation, RefTypeZeroInit, nullptr /* theTreeNode */, allRegs(interval->registerType)); pos->setRegOptional(true); } else { setIntervalAsSpilled(interval); JITDUMP(" marking as spilled\n"); } } } // We must also insert zero-inits for any finallyVars if they are refs or if compInitMem is true. if (compiler->lvaEnregEHVars) { VarSetOps::Iter iter(compiler, finallyVars); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { LclVarDsc* varDsc = compiler->lvaGetDescByTrackedIndex(varIndex); if (!varDsc->lvIsParam && isCandidateVar(varDsc)) { JITDUMP("V%02u is a finally var:", compiler->lvaTrackedIndexToLclNum(varIndex)); Interval* interval = getIntervalForLocalVar(varIndex); if (compiler->info.compInitMem || varTypeIsGC(varDsc->TypeGet())) { if (interval->recentRefPosition == nullptr) { JITDUMP(" creating ZeroInit\n"); RefPosition* pos = newRefPosition(interval, MinLocation, RefTypeZeroInit, nullptr /* theTreeNode */, allRegs(interval->registerType)); pos->setRegOptional(true); varDsc->lvMustInit = true; } else { // We must only generate one entry RefPosition for each Interval. Since this is not // a parameter, it can't be RefTypeParamDef, so it must be RefTypeZeroInit, which // we must have generated for the live-in case above. assert(interval->recentRefPosition->refType == RefTypeZeroInit); JITDUMP(" already ZeroInited\n"); } } } } } } #if defined(UNIX_AMD64_ABI) //------------------------------------------------------------------------ // unixAmd64UpdateRegStateForArg: Sets the register state for an argument of type STRUCT for System V systems. // // Arguments: // argDsc - the LclVarDsc for the argument of interest // // Notes: // See Compiler::raUpdateRegStateForArg(RegState *regState, LclVarDsc *argDsc) in regalloc.cpp // for how state for argument is updated for unix non-structs and Windows AMD64 structs. // void LinearScan::unixAmd64UpdateRegStateForArg(LclVarDsc* argDsc) { assert(varTypeIsStruct(argDsc)); RegState* intRegState = &compiler->codeGen->intRegState; RegState* floatRegState = &compiler->codeGen->floatRegState; if ((argDsc->GetArgReg() != REG_STK) && (argDsc->GetArgReg() != REG_NA)) { if (genRegMask(argDsc->GetArgReg()) & (RBM_ALLFLOAT)) { assert(genRegMask(argDsc->GetArgReg()) & (RBM_FLTARG_REGS)); floatRegState->rsCalleeRegArgMaskLiveIn |= genRegMask(argDsc->GetArgReg()); } else { assert(genRegMask(argDsc->GetArgReg()) & (RBM_ARG_REGS)); intRegState->rsCalleeRegArgMaskLiveIn |= genRegMask(argDsc->GetArgReg()); } } if ((argDsc->GetOtherArgReg() != REG_STK) && (argDsc->GetOtherArgReg() != REG_NA)) { if (genRegMask(argDsc->GetOtherArgReg()) & (RBM_ALLFLOAT)) { assert(genRegMask(argDsc->GetOtherArgReg()) & (RBM_FLTARG_REGS)); floatRegState->rsCalleeRegArgMaskLiveIn |= genRegMask(argDsc->GetOtherArgReg()); } else { assert(genRegMask(argDsc->GetOtherArgReg()) & (RBM_ARG_REGS)); intRegState->rsCalleeRegArgMaskLiveIn |= genRegMask(argDsc->GetOtherArgReg()); } } } #endif // defined(UNIX_AMD64_ABI) //------------------------------------------------------------------------ // updateRegStateForArg: Updates rsCalleeRegArgMaskLiveIn for the appropriate // regState (either compiler->intRegState or compiler->floatRegState), // with the lvArgReg on "argDsc" // // Arguments: // argDsc - the argument for which the state is to be updated. // // Return Value: None // // Assumptions: // The argument is live on entry to the function // (or is untracked and therefore assumed live) // // Notes: // This relies on a method in regAlloc.cpp that is shared between LSRA // and regAlloc. It is further abstracted here because regState is updated // separately for tracked and untracked variables in LSRA. // void LinearScan::updateRegStateForArg(LclVarDsc* argDsc) { #if defined(UNIX_AMD64_ABI) // For System V AMD64 calls the argDsc can have 2 registers (for structs.) // Handle them here. if (varTypeIsStruct(argDsc)) { unixAmd64UpdateRegStateForArg(argDsc); } else #endif // defined(UNIX_AMD64_ABI) { RegState* intRegState = &compiler->codeGen->intRegState; RegState* floatRegState = &compiler->codeGen->floatRegState; bool isFloat = emitter::isFloatReg(argDsc->GetArgReg()); if (argDsc->lvIsHfaRegArg()) { isFloat = true; } if (isFloat) { JITDUMP("Float arg V%02u in reg %s\n", compiler->lvaGetLclNum(argDsc), getRegName(argDsc->GetArgReg())); compiler->raUpdateRegStateForArg(floatRegState, argDsc); } else { JITDUMP("Int arg V%02u in reg %s\n", compiler->lvaGetLclNum(argDsc), getRegName(argDsc->GetArgReg())); #if FEATURE_MULTIREG_ARGS if (argDsc->GetOtherArgReg() != REG_NA) { JITDUMP("(second half) in reg %s\n", getRegName(argDsc->GetOtherArgReg())); } #endif // FEATURE_MULTIREG_ARGS compiler->raUpdateRegStateForArg(intRegState, argDsc); } } } //------------------------------------------------------------------------ // buildIntervals: The main entry point for building the data structures over // which we will do register allocation. // void LinearScan::buildIntervals() { BasicBlock* block; JITDUMP("\nbuildIntervals ========\n"); // Build (empty) records for all of the physical registers buildPhysRegRecords(); #ifdef DEBUG if (VERBOSE) { printf("\n-----------------\n"); printf("LIVENESS:\n"); printf("-----------------\n"); for (BasicBlock* const block : compiler->Blocks()) { printf(FMT_BB " use def in out\n", block->bbNum); dumpConvertedVarSet(compiler, block->bbVarUse); printf("\n"); dumpConvertedVarSet(compiler, block->bbVarDef); printf("\n"); dumpConvertedVarSet(compiler, block->bbLiveIn); printf("\n"); dumpConvertedVarSet(compiler, block->bbLiveOut); printf("\n"); } } #endif // DEBUG #if DOUBLE_ALIGN // We will determine whether we should double align the frame during // identifyCandidates(), but we initially assume that we will not. doDoubleAlign = false; #endif identifyCandidates(); // Figure out if we're going to use a frame pointer. We need to do this before building // the ref positions, because those objects will embed the frame register in various register masks // if the frame pointer is not reserved. If we decide to have a frame pointer, setFrameType() will // remove the frame pointer from the masks. setFrameType(); DBEXEC(VERBOSE, TupleStyleDump(LSRA_DUMP_PRE)); // second part: JITDUMP("\nbuildIntervals second part ========\n"); currentLoc = 0; // TODO-Cleanup: This duplicates prior behavior where entry (ParamDef) RefPositions were // being assigned the bbNum of the last block traversed in the 2nd phase of Lowering. // Previously, the block sequencing was done for the (formerly separate) Build pass, // and the curBBNum was left as the last block sequenced. This block was then used to set the // weight for the entry (ParamDef) RefPositions. It would be logical to set this to the // normalized entry weight (compiler->fgCalledCount), but that results in a net regression. if (!blockSequencingDone) { setBlockSequence(); } // Next, create ParamDef RefPositions for all the tracked parameters, in order of their varIndex. // Assign these RefPositions to the (nonexistent) BB0. curBBNum = 0; RegState* intRegState = &compiler->codeGen->intRegState; RegState* floatRegState = &compiler->codeGen->floatRegState; intRegState->rsCalleeRegArgMaskLiveIn = RBM_NONE; floatRegState->rsCalleeRegArgMaskLiveIn = RBM_NONE; for (unsigned int varIndex = 0; varIndex < compiler->lvaTrackedCount; varIndex++) { LclVarDsc* argDsc = compiler->lvaGetDescByTrackedIndex(varIndex); if (!argDsc->lvIsParam) { continue; } // Only reserve a register if the argument is actually used. // Is it dead on entry? If compJmpOpUsed is true, then the arguments // have to be kept alive, so we have to consider it as live on entry. // Use lvRefCnt instead of checking bbLiveIn because if it's volatile we // won't have done dataflow on it, but it needs to be marked as live-in so // it will get saved in the prolog. if (!compiler->compJmpOpUsed && argDsc->lvRefCnt() == 0 && !compiler->opts.compDbgCode) { continue; } if (argDsc->lvIsRegArg) { updateRegStateForArg(argDsc); } if (isCandidateVar(argDsc)) { Interval* interval = getIntervalForLocalVar(varIndex); const var_types regType = argDsc->GetRegisterType(); regMaskTP mask = allRegs(regType); if (argDsc->lvIsRegArg) { // Set this interval as currently assigned to that register regNumber inArgReg = argDsc->GetArgReg(); assert(inArgReg < REG_COUNT); mask = genRegMask(inArgReg); assignPhysReg(inArgReg, interval); INDEBUG(registersToDump |= getRegMask(inArgReg, interval->registerType)); } RefPosition* pos = newRefPosition(interval, MinLocation, RefTypeParamDef, nullptr, mask); pos->setRegOptional(true); } else if (varTypeIsStruct(argDsc->lvType)) { for (unsigned fieldVarNum = argDsc->lvFieldLclStart; fieldVarNum < argDsc->lvFieldLclStart + argDsc->lvFieldCnt; ++fieldVarNum) { const LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(fieldVarNum); if (fieldVarDsc->lvLRACandidate) { assert(fieldVarDsc->lvTracked); Interval* interval = getIntervalForLocalVar(fieldVarDsc->lvVarIndex); RefPosition* pos = newRefPosition(interval, MinLocation, RefTypeParamDef, nullptr, allRegs(TypeGet(fieldVarDsc))); pos->setRegOptional(true); } } } else { // We can overwrite the register (i.e. codegen saves it on entry) assert(argDsc->lvRefCnt() == 0 || !argDsc->lvIsRegArg || argDsc->lvDoNotEnregister || !argDsc->lvLRACandidate || (varTypeIsFloating(argDsc->TypeGet()) && compiler->opts.compDbgCode)); } } // Now set up the reg state for the non-tracked args // (We do this here because we want to generate the ParamDef RefPositions in tracked // order, so that loop doesn't hit the non-tracked args) for (unsigned argNum = 0; argNum < compiler->info.compArgsCount; argNum++) { LclVarDsc* argDsc = compiler->lvaGetDesc(argNum); if (argDsc->lvPromotedStruct()) { for (unsigned fieldVarNum = argDsc->lvFieldLclStart; fieldVarNum < argDsc->lvFieldLclStart + argDsc->lvFieldCnt; ++fieldVarNum) { LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(fieldVarNum); noway_assert(fieldVarDsc->lvIsParam); if (!fieldVarDsc->lvTracked && fieldVarDsc->lvIsRegArg) { updateRegStateForArg(fieldVarDsc); } } } else { noway_assert(argDsc->lvIsParam); if (!argDsc->lvTracked && argDsc->lvIsRegArg) { updateRegStateForArg(argDsc); } } } // If there is a secret stub param, it is also live in if (compiler->info.compPublishStubParam) { intRegState->rsCalleeRegArgMaskLiveIn |= RBM_SECRET_STUB_PARAM; } BasicBlock* predBlock = nullptr; BasicBlock* prevBlock = nullptr; // Initialize currentLiveVars to the empty set. We will set it to the current // live-in at the entry to each block (this will include the incoming args on // the first block). VarSetOps::AssignNoCopy(compiler, currentLiveVars, VarSetOps::MakeEmpty(compiler)); for (block = startBlockSequence(); block != nullptr; block = moveToNextBlock()) { JITDUMP("\nNEW BLOCK " FMT_BB "\n", block->bbNum); compiler->compCurBB = block; bool predBlockIsAllocated = false; predBlock = findPredBlockForLiveIn(block, prevBlock DEBUGARG(&predBlockIsAllocated)); if (predBlock != nullptr) { JITDUMP("\n\nSetting " FMT_BB " as the predecessor for determining incoming variable registers of " FMT_BB "\n", predBlock->bbNum, block->bbNum); assert(predBlock->bbNum <= bbNumMaxBeforeResolution); blockInfo[block->bbNum].predBBNum = predBlock->bbNum; } if (enregisterLocalVars) { VarSetOps::AssignNoCopy(compiler, currentLiveVars, VarSetOps::Intersection(compiler, registerCandidateVars, block->bbLiveIn)); if (block == compiler->fgFirstBB) { insertZeroInitRefPositions(); // The first real location is at 1; 0 is for the entry. currentLoc = 1; } // For blocks that don't have EHBoundaryIn, we need DummyDefs for cases where "predBlock" isn't // really a predecessor. // Note that it's possible to have uses of unitialized variables, in which case even the first // block may require DummyDefs, which we are not currently adding - this means that these variables // will always be considered to be in memory on entry (and reloaded when the use is encountered). // TODO-CQ: Consider how best to tune this. Currently, if we create DummyDefs for uninitialized // variables (which may actually be initialized along the dynamically executed paths, but not // on all static paths), we wind up with excessive liveranges for some of these variables. if (!blockInfo[block->bbNum].hasEHBoundaryIn) { // Any lclVars live-in on a non-EH boundary edge are resolution candidates. VarSetOps::UnionD(compiler, resolutionCandidateVars, currentLiveVars); if (block != compiler->fgFirstBB) { VARSET_TP newLiveIn(VarSetOps::MakeCopy(compiler, currentLiveVars)); if (predBlock != nullptr) { // Compute set difference: newLiveIn = currentLiveVars - predBlock->bbLiveOut VarSetOps::DiffD(compiler, newLiveIn, predBlock->bbLiveOut); } // Don't create dummy defs for EH vars; we'll load them from the stack as/when needed. VarSetOps::DiffD(compiler, newLiveIn, exceptVars); // Create dummy def RefPositions if (!VarSetOps::IsEmpty(compiler, newLiveIn)) { // If we are using locations from a predecessor, we should never require DummyDefs. assert(!predBlockIsAllocated); JITDUMP("Creating dummy definitions\n"); VarSetOps::Iter iter(compiler, newLiveIn); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { // Add a dummyDef for any candidate vars that are in the "newLiveIn" set. LclVarDsc* varDsc = compiler->lvaGetDescByTrackedIndex(varIndex); assert(isCandidateVar(varDsc)); Interval* interval = getIntervalForLocalVar(varIndex); RefPosition* pos = newRefPosition(interval, currentLoc, RefTypeDummyDef, nullptr, allRegs(interval->registerType)); pos->setRegOptional(true); } JITDUMP("Finished creating dummy definitions\n\n"); } } } } // Add a dummy RefPosition to mark the block boundary. // Note that we do this AFTER adding the exposed uses above, because the // register positions for those exposed uses need to be recorded at // this point. RefPosition* pos = newRefPosition((Interval*)nullptr, currentLoc, RefTypeBB, nullptr, RBM_NONE); currentLoc += 2; JITDUMP("\n"); if (firstColdLoc == MaxLocation) { if (block->isRunRarely()) { firstColdLoc = currentLoc; JITDUMP("firstColdLoc = %d\n", firstColdLoc); } } else { // TODO: We'd like to assert the following but we don't currently ensure that only // "RunRarely" blocks are contiguous. // (The funclets will generally be last, but we don't follow layout order, so we // don't have to preserve that in the block sequence.) // assert(block->isRunRarely()); } // For frame poisoning we generate code into scratch BB right after prolog since // otherwise the prolog might become too large. In this case we will put the poison immediate // into the scratch register, so it will be killed here. if (compiler->compShouldPoisonFrame() && compiler->fgFirstBBisScratch() && block == compiler->fgFirstBB) { regMaskTP killed; #if defined(TARGET_XARCH) // Poisoning uses EAX for small vars and rep stosd that kills edi, ecx and eax for large vars. killed = RBM_EDI | RBM_ECX | RBM_EAX; #else // Poisoning uses REG_SCRATCH for small vars and memset helper for big vars. killed = genRegMask(REG_SCRATCH) | compiler->compHelperCallKillSet(CORINFO_HELP_MEMSET); #endif addRefsForPhysRegMask(killed, currentLoc + 1, RefTypeKill, true); currentLoc += 2; } LIR::Range& blockRange = LIR::AsRange(block); for (GenTree* node : blockRange) { // We increment the location of each tree node by 2 so that the node definition, if any, // is at a new location and doesn't interfere with the uses. // For multi-reg local stores, the 'BuildMultiRegStoreLoc' method will further increment the // location by 2 for each destination register beyond the first. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG node->gtSeqNum = currentLoc; // In DEBUG, we want to set the gtRegTag to GT_REGTAG_REG, so that subsequent dumps will show the register // value. // Although this looks like a no-op it sets the tag. node->SetRegNum(node->GetRegNum()); #endif buildRefPositionsForNode(node, currentLoc); #ifdef DEBUG if (currentLoc > maxNodeLocation) { maxNodeLocation = currentLoc; } #endif // DEBUG currentLoc += 2; } #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // At the end of each block, create upperVectorRestores for any largeVectorVars that may be // partiallySpilled (during the build phase all intervals will be marked isPartiallySpilled if // they *may) be partially spilled at any point. if (enregisterLocalVars) { VarSetOps::Iter largeVectorVarsIter(compiler, largeVectorVars); unsigned largeVectorVarIndex = 0; while (largeVectorVarsIter.NextElem(&largeVectorVarIndex)) { Interval* lclVarInterval = getIntervalForLocalVar(largeVectorVarIndex); buildUpperVectorRestoreRefPosition(lclVarInterval, currentLoc, nullptr, false); } } #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE // Note: the visited set is cleared in LinearScan::doLinearScan() markBlockVisited(block); if (!defList.IsEmpty()) { INDEBUG(dumpDefList()); assert(!"Expected empty defList at end of block"); } if (enregisterLocalVars) { // Insert exposed uses for a lclVar that is live-out of 'block' but not live-in to the // next block, or any unvisited successors. // This will address lclVars that are live on a backedge, as well as those that are kept // live at a GT_JMP. // // Blocks ending with "jmp method" are marked as BBJ_HAS_JMP, // and jmp call is represented using GT_JMP node which is a leaf node. // Liveness phase keeps all the arguments of the method live till the end of // block by adding them to liveout set of the block containing GT_JMP. // // The target of a GT_JMP implicitly uses all the current method arguments, however // there are no actual references to them. This can cause LSRA to assert, because // the variables are live but it sees no references. In order to correctly model the // liveness of these arguments, we add dummy exposed uses, in the same manner as for // backward branches. This will happen automatically via expUseSet. // // Note that a block ending with GT_JMP has no successors and hence the variables // for which dummy use ref positions are added are arguments of the method. VARSET_TP expUseSet(VarSetOps::MakeCopy(compiler, block->bbLiveOut)); VarSetOps::IntersectionD(compiler, expUseSet, registerCandidateVars); BasicBlock* nextBlock = getNextBlock(); if (nextBlock != nullptr) { VarSetOps::DiffD(compiler, expUseSet, nextBlock->bbLiveIn); } for (BasicBlock* succ : block->GetAllSuccs(compiler)) { if (VarSetOps::IsEmpty(compiler, expUseSet)) { break; } if (isBlockVisited(succ)) { continue; } VarSetOps::DiffD(compiler, expUseSet, succ->bbLiveIn); } if (!VarSetOps::IsEmpty(compiler, expUseSet)) { JITDUMP("Exposed uses:"); VarSetOps::Iter iter(compiler, expUseSet); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { unsigned varNum = compiler->lvaTrackedToVarNum[varIndex]; const LclVarDsc* varDsc = compiler->lvaGetDesc(varNum); assert(isCandidateVar(varDsc)); Interval* interval = getIntervalForLocalVar(varIndex); RefPosition* pos = newRefPosition(interval, currentLoc, RefTypeExpUse, nullptr, allRegs(interval->registerType)); pos->setRegOptional(true); JITDUMP(" V%02u", varNum); } JITDUMP("\n"); } // Clear the "last use" flag on any vars that are live-out from this block. VARSET_TP bbLiveDefs(VarSetOps::Intersection(compiler, registerCandidateVars, block->bbLiveOut)); VarSetOps::Iter iter(compiler, bbLiveDefs); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { unsigned varNum = compiler->lvaTrackedToVarNum[varIndex]; LclVarDsc* const varDsc = compiler->lvaGetDesc(varNum); assert(isCandidateVar(varDsc)); RefPosition* const lastRP = getIntervalForLocalVar(varIndex)->lastRefPosition; // We should be able to assert that lastRP is non-null if it is live-out, but sometimes liveness // lies. if ((lastRP != nullptr) && (lastRP->bbNum == block->bbNum)) { lastRP->lastUse = false; } } #ifdef DEBUG checkLastUses(block); if (VERBOSE) { printf("use: "); dumpConvertedVarSet(compiler, block->bbVarUse); printf("\ndef: "); dumpConvertedVarSet(compiler, block->bbVarDef); printf("\n"); } #endif // DEBUG } prevBlock = block; } if (enregisterLocalVars) { if (compiler->lvaKeepAliveAndReportThis()) { // If we need to KeepAliveAndReportThis, add a dummy exposed use of it at the end unsigned keepAliveVarNum = compiler->info.compThisArg; assert(compiler->info.compIsStatic == false); const LclVarDsc* varDsc = compiler->lvaGetDesc(keepAliveVarNum); if (isCandidateVar(varDsc)) { JITDUMP("Adding exposed use of this, for lvaKeepAliveAndReportThis\n"); Interval* interval = getIntervalForLocalVar(varDsc->lvVarIndex); RefPosition* pos = newRefPosition(interval, currentLoc, RefTypeExpUse, nullptr, allRegs(interval->registerType)); pos->setRegOptional(true); } } // Adjust heuristics for writeThru intervals. if (compiler->compHndBBtabCount > 0) { VarSetOps::Iter iter(compiler, exceptVars); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { unsigned varNum = compiler->lvaTrackedToVarNum[varIndex]; LclVarDsc* varDsc = compiler->lvaGetDesc(varNum); Interval* interval = getIntervalForLocalVar(varIndex); assert(interval->isWriteThru); weight_t weight = varDsc->lvRefCntWtd(); // We'd like to only allocate registers for EH vars that have enough uses // to compensate for the additional registers being live (and for the possibility // that we may have to insert an additional copy). // However, we don't currently have that information available. Instead, we'll // aggressively assume that these vars are defined once, at their first RefPosition. // RefPosition* firstRefPosition = interval->firstRefPosition; // Incoming reg args are given an initial weight of 2 * BB_UNITY_WEIGHT // (see lvaComputeRefCounts(); this may be reviewed/changed in future). // weight_t initialWeight = (firstRefPosition->refType == RefTypeParamDef) ? (2 * BB_UNITY_WEIGHT) : blockInfo[firstRefPosition->bbNum].weight; weight -= initialWeight; // If the remaining weight is less than the initial weight, we'd like to allocate it only // opportunistically, but we don't currently have a mechanism to do so. // For now, we'll just avoid using callee-save registers if the weight is too low. if (interval->preferCalleeSave) { // The benefit of a callee-save register isn't as high as it would be for a normal arg. // We'll have at least the cost of saving & restoring the callee-save register, // so we won't break even until we have at least 4 * BB_UNITY_WEIGHT. // Given that we also don't have a good way to tell whether the variable is live // across a call in the non-EH code, we'll be extra conservative about this. // Note that for writeThru intervals we don't update the preferences to be only callee-save. unsigned calleeSaveCount = (varTypeUsesFloatReg(interval->registerType)) ? CNT_CALLEE_SAVED_FLOAT : CNT_CALLEE_ENREG; if ((weight <= (BB_UNITY_WEIGHT * 7)) || varDsc->lvVarIndex >= calleeSaveCount) { // If this is relatively low weight, don't prefer callee-save at all. interval->preferCalleeSave = false; } else { // In other cases, we'll add in the callee-save regs to the preferences, but not clear // the non-callee-save regs . We also handle this case specially in tryAllocateFreeReg(). interval->registerPreferences |= calleeSaveRegs(interval->registerType); } } } } #ifdef DEBUG if (getLsraExtendLifeTimes()) { for (unsigned lclNum = 0; lclNum < compiler->lvaCount; lclNum++) { LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum); if (varDsc->lvLRACandidate) { JITDUMP("Adding exposed use of V%02u for LsraExtendLifetimes\n", lclNum); Interval* interval = getIntervalForLocalVar(varDsc->lvVarIndex); RefPosition* pos = newRefPosition(interval, currentLoc, RefTypeExpUse, nullptr, allRegs(interval->registerType)); pos->setRegOptional(true); } } } #endif // DEBUG } // If the last block has successors, create a RefTypeBB to record // what's live if (prevBlock->NumSucc(compiler) > 0) { RefPosition* pos = newRefPosition((Interval*)nullptr, currentLoc, RefTypeBB, nullptr, RBM_NONE); } #ifdef DEBUG // Make sure we don't have any blocks that were not visited for (BasicBlock* const block : compiler->Blocks()) { assert(isBlockVisited(block)); } if (VERBOSE) { lsraDumpIntervals("BEFORE VALIDATING INTERVALS"); dumpRefPositions("BEFORE VALIDATING INTERVALS"); } validateIntervals(); #endif // DEBUG } #ifdef DEBUG //------------------------------------------------------------------------ // validateIntervals: A DEBUG-only method that checks that: // - the lclVar RefPositions do not reflect uses of undefined values // - A singleDef interval should have just first RefPosition as RefTypeDef. // // TODO-Cleanup: If an undefined use is encountered, it merely prints a message // but probably assert. // void LinearScan::validateIntervals() { if (enregisterLocalVars) { for (unsigned i = 0; i < compiler->lvaTrackedCount; i++) { if (!compiler->lvaGetDescByTrackedIndex(i)->lvLRACandidate) { continue; } Interval* interval = getIntervalForLocalVar(i); bool defined = false; unsigned lastUseBBNum = 0; JITDUMP("-----------------\n"); for (RefPosition* ref = interval->firstRefPosition; ref != nullptr; ref = ref->nextRefPosition) { if (VERBOSE) { ref->dump(this); } RefType refType = ref->refType; if (!defined && RefTypeIsUse(refType) && (lastUseBBNum == ref->bbNum)) { if (!ref->lastUse) { if (compiler->info.compMethodName != nullptr) { JITDUMP("%s: ", compiler->info.compMethodName); } JITDUMP("LocalVar V%02u: undefined use at %u\n", interval->varNum, ref->nodeLocation); assert(false); } } // For single-def intervals, the only the first refposition should be a RefTypeDef if (interval->isSingleDef && RefTypeIsDef(refType)) { assert(ref == interval->firstRefPosition); } // Note that there can be multiple last uses if they are on disjoint paths, // so we can't really check the lastUse flag if (ref->lastUse) { defined = false; lastUseBBNum = ref->bbNum; } if (RefTypeIsDef(refType)) { defined = true; } } } } } #endif // DEBUG #if defined(TARGET_XARCH) || defined(FEATURE_HW_INTRINSICS) //------------------------------------------------------------------------ // setTgtPref: Set a preference relationship between the given Interval // and a Use RefPosition. // // Arguments: // interval - An interval whose defining instruction has tgtPrefUse as a use // tgtPrefUse - The use RefPosition // // Notes: // This is called when we would like tgtPrefUse and this def to get the same register. // This is only desirable if the use is a last use, which it is if it is a non-local, // *or* if it is a lastUse. // Note that we don't yet have valid lastUse information in the RefPositions that we're building // (every RefPosition is set as a lastUse until we encounter a new use), so we have to rely on the treeNode. // This may be called for multiple uses, in which case 'interval' will only get preferenced at most // to the first one (if it didn't already have a 'relatedInterval'. // void setTgtPref(Interval* interval, RefPosition* tgtPrefUse) { if (tgtPrefUse != nullptr) { Interval* useInterval = tgtPrefUse->getInterval(); if (!useInterval->isLocalVar || (tgtPrefUse->treeNode == nullptr) || ((tgtPrefUse->treeNode->gtFlags & GTF_VAR_DEATH) != 0)) { // Set the use interval as related to the interval we're defining. useInterval->assignRelatedIntervalIfUnassigned(interval); } } } #endif // TARGET_XARCH || FEATURE_HW_INTRINSICS //------------------------------------------------------------------------ // BuildDef: Build a RefTypeDef RefPosition for the given node // // Arguments: // tree - The node that defines a register // dstCandidates - The candidate registers for the definition // multiRegIdx - The index of the definition, defaults to zero. // Only non-zero for multi-reg nodes. // // Return Value: // The newly created RefPosition. // // Notes: // Adds the RefInfo for the definition to the defList. // RefPosition* LinearScan::BuildDef(GenTree* tree, regMaskTP dstCandidates, int multiRegIdx) { assert(!tree->isContained()); if (dstCandidates != RBM_NONE) { assert((tree->GetRegNum() == REG_NA) || (dstCandidates == genRegMask(tree->GetRegByIndex(multiRegIdx)))); } RegisterType type; if (!tree->IsMultiRegNode()) { type = getDefType(tree); } else { type = tree->GetRegTypeByIndex(multiRegIdx); } if (varTypeUsesFloatReg(type)) { compiler->compFloatingPointUsed = true; } Interval* interval = newInterval(type); if (tree->GetRegNum() != REG_NA) { if (!tree->IsMultiRegNode() || (multiRegIdx == 0)) { assert((dstCandidates == RBM_NONE) || (dstCandidates == genRegMask(tree->GetRegNum()))); dstCandidates = genRegMask(tree->GetRegNum()); } else { assert(isSingleRegister(dstCandidates)); } } #ifdef TARGET_X86 else if (varTypeIsByte(tree)) { if (dstCandidates == RBM_NONE) { dstCandidates = allRegs(TYP_INT); } dstCandidates &= ~RBM_NON_BYTE_REGS; assert(dstCandidates != RBM_NONE); } #endif // TARGET_X86 if (pendingDelayFree) { interval->hasInterferingUses = true; // pendingDelayFree = false; } RefPosition* defRefPosition = newRefPosition(interval, currentLoc + 1, RefTypeDef, tree, dstCandidates, multiRegIdx); if (tree->IsUnusedValue()) { defRefPosition->isLocalDefUse = true; defRefPosition->lastUse = true; } else { RefInfoListNode* refInfo = listNodePool.GetNode(defRefPosition, tree); defList.Append(refInfo); } #if defined(TARGET_XARCH) || defined(FEATURE_HW_INTRINSICS) setTgtPref(interval, tgtPrefUse); setTgtPref(interval, tgtPrefUse2); #endif // TARGET_XARCH #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE assert(!interval->isPartiallySpilled); #endif return defRefPosition; } //------------------------------------------------------------------------ // BuildDef: Build one or more RefTypeDef RefPositions for the given node // // Arguments: // tree - The node that defines a register // dstCount - The number of registers defined by the node // dstCandidates - the candidate registers for the definition // // Notes: // Adds the RefInfo for the definitions to the defList. // void LinearScan::BuildDefs(GenTree* tree, int dstCount, regMaskTP dstCandidates) { bool fixedReg = false; if ((dstCount > 1) && (dstCandidates != RBM_NONE) && ((int)genCountBits(dstCandidates) == dstCount)) { fixedReg = true; } const ReturnTypeDesc* retTypeDesc = nullptr; if (tree->IsMultiRegCall()) { retTypeDesc = tree->AsCall()->GetReturnTypeDesc(); } for (int i = 0; i < dstCount; i++) { regMaskTP thisDstCandidates; if (fixedReg) { // In case of multi-reg call node, we have to query the i'th position return register. // For all other cases of multi-reg definitions, the registers must be in sequential order. if (retTypeDesc != nullptr) { thisDstCandidates = genRegMask(tree->AsCall()->GetReturnTypeDesc()->GetABIReturnReg(i)); assert((dstCandidates & thisDstCandidates) != RBM_NONE); } else { thisDstCandidates = genFindLowestBit(dstCandidates); } dstCandidates &= ~thisDstCandidates; } else { thisDstCandidates = dstCandidates; } BuildDef(tree, thisDstCandidates, i); } } //------------------------------------------------------------------------ // BuildDef: Build one or more RefTypeDef RefPositions for the given node, // as well as kills as specified by the given mask. // // Arguments: // tree - The node that defines a register // dstCount - The number of registers defined by the node // dstCandidates - The candidate registers for the definition // killMask - The mask of registers killed by this node // // Notes: // Adds the RefInfo for the definitions to the defList. // The def and kill functionality is folded into a single method so that the // save and restores of upper vector registers can be bracketed around the def. // void LinearScan::BuildDefsWithKills(GenTree* tree, int dstCount, regMaskTP dstCandidates, regMaskTP killMask) { assert(killMask == getKillSetForNode(tree)); // Call this even when killMask is RBM_NONE, as we have to check for some special cases buildKillPositionsForNode(tree, currentLoc + 1, killMask); if (killMask != RBM_NONE) { #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // Build RefPositions to account for the fact that, even in a callee-save register, the upper half of any large // vector will be killed by a call. // We actually need to find any calls that kill the upper-half of the callee-save vector registers. // But we will use as a proxy any node that kills floating point registers. // (Note that some calls are masquerading as other nodes at this point so we can't just check for calls.) // We call this unconditionally for such nodes, as we will create RefPositions for any large vector tree temps // even if 'enregisterLocalVars' is false, or 'liveLargeVectors' is empty, though currently the allocation // phase will fully (rather than partially) spill those, so we don't need to build the UpperVectorRestore // RefPositions in that case. // This must be done after the kills, so that we know which large vectors are still live. // if ((killMask & RBM_FLT_CALLEE_TRASH) != RBM_NONE) { buildUpperVectorSaveRefPositions(tree, currentLoc + 1, killMask); } #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE } // Now, create the Def(s) BuildDefs(tree, dstCount, dstCandidates); } //------------------------------------------------------------------------ // BuildUse: Remove the RefInfoListNode for the given multi-reg index of the given node from // the defList, and build a use RefPosition for the associated Interval. // // Arguments: // operand - The node of interest // candidates - The register candidates for the use // multiRegIdx - The index of the multireg def/use // // Return Value: // The newly created use RefPosition // // Notes: // The node must not be contained, and must have been processed by buildRefPositionsForNode(). // RefPosition* LinearScan::BuildUse(GenTree* operand, regMaskTP candidates, int multiRegIdx) { assert(!operand->isContained()); Interval* interval; bool regOptional = operand->IsRegOptional(); if (isCandidateLocalRef(operand)) { interval = getIntervalForLocalVarNode(operand->AsLclVarCommon()); // We have only approximate last-use information at this point. This is because the // execution order doesn't actually reflect the true order in which the localVars // are referenced - but the order of the RefPositions will, so we recompute it after // RefPositions are built. // Use the old value for setting currentLiveVars - note that we do this with the // not-quite-correct setting of lastUse. However, this is OK because // 1) this is only for preferencing, which doesn't require strict correctness, and // 2) the cases where these out-of-order uses occur should not overlap a kill. // TODO-Throughput: clean this up once we have the execution order correct. At that point // we can update currentLiveVars at the same place that we create the RefPosition. if ((operand->gtFlags & GTF_VAR_DEATH) != 0) { unsigned varIndex = interval->getVarIndex(compiler); VarSetOps::RemoveElemD(compiler, currentLiveVars, varIndex); } #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE buildUpperVectorRestoreRefPosition(interval, currentLoc, operand, true); #endif } else if (operand->IsMultiRegLclVar()) { assert(compiler->lvaEnregMultiRegVars); LclVarDsc* varDsc = compiler->lvaGetDesc(operand->AsLclVar()); LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + multiRegIdx); interval = getIntervalForLocalVar(fieldVarDsc->lvVarIndex); if (operand->AsLclVar()->IsLastUse(multiRegIdx)) { VarSetOps::RemoveElemD(compiler, currentLiveVars, fieldVarDsc->lvVarIndex); } #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE buildUpperVectorRestoreRefPosition(interval, currentLoc, operand, true); #endif } else { RefInfoListNode* refInfo = defList.removeListNode(operand, multiRegIdx); RefPosition* defRefPos = refInfo->ref; assert(defRefPos->multiRegIdx == multiRegIdx); interval = defRefPos->getInterval(); listNodePool.ReturnNode(refInfo); operand = nullptr; } RefPosition* useRefPos = newRefPosition(interval, currentLoc, RefTypeUse, operand, candidates, multiRegIdx); useRefPos->setRegOptional(regOptional); return useRefPos; } //------------------------------------------------------------------------ // BuildIndirUses: Build Use RefPositions for an indirection that might be contained // // Arguments: // indirTree - The indirection node of interest // // Return Value: // The number of source registers used by the *parent* of this node. // // Notes: // This method may only be used if the candidates are the same for all sources. // int LinearScan::BuildIndirUses(GenTreeIndir* indirTree, regMaskTP candidates) { GenTree* const addr = indirTree->gtOp1; return BuildAddrUses(addr, candidates); } int LinearScan::BuildAddrUses(GenTree* addr, regMaskTP candidates) { if (!addr->isContained()) { BuildUse(addr, candidates); return 1; } if (!addr->OperIs(GT_LEA)) { return 0; } GenTreeAddrMode* const addrMode = addr->AsAddrMode(); unsigned srcCount = 0; if ((addrMode->Base() != nullptr) && !addrMode->Base()->isContained()) { BuildUse(addrMode->Base(), candidates); srcCount++; } if (addrMode->Index() != nullptr) { if (!addrMode->Index()->isContained()) { BuildUse(addrMode->Index(), candidates); srcCount++; } #ifdef TARGET_ARM64 else if (addrMode->Index()->OperIs(GT_BFIZ)) { GenTreeCast* cast = addrMode->Index()->gtGetOp1()->AsCast(); assert(cast->isContained()); BuildUse(cast->CastOp(), candidates); srcCount++; } #endif } return srcCount; } //------------------------------------------------------------------------ // BuildOperandUses: Build Use RefPositions for an operand that might be contained. // // Arguments: // node - The node of interest // // Return Value: // The number of source registers used by the *parent* of this node. // int LinearScan::BuildOperandUses(GenTree* node, regMaskTP candidates) { if (!node->isContained()) { BuildUse(node, candidates); return 1; } #ifdef TARGET_ARM64 // Must happen before OperIsHWIntrinsic case, // but this occurs when a vector zero node is marked as contained. if (node->IsVectorZero()) { return 0; } #endif #if !defined(TARGET_64BIT) if (node->OperIs(GT_LONG)) { return BuildBinaryUses(node->AsOp(), candidates); } #endif // !defined(TARGET_64BIT) if (node->OperIsIndir()) { return BuildIndirUses(node->AsIndir(), candidates); } if (node->OperIs(GT_LEA)) { return BuildAddrUses(node, candidates); } #ifdef FEATURE_HW_INTRINSICS if (node->OperIsHWIntrinsic()) { if (node->AsHWIntrinsic()->OperIsMemoryLoad()) { return BuildAddrUses(node->AsHWIntrinsic()->Op(1)); } assert(node->AsHWIntrinsic()->GetOperandCount() == 1); BuildUse(node->AsHWIntrinsic()->Op(1), candidates); return 1; } #endif // FEATURE_HW_INTRINSICS #ifdef TARGET_ARM64 if (node->OperIs(GT_MUL)) { // Can be contained for MultiplyAdd on arm64 return BuildBinaryUses(node->AsOp(), candidates); } if (node->OperIs(GT_NEG, GT_CAST, GT_LSH)) { // GT_NEG can be contained for MultiplyAdd on arm64 // GT_CAST and GT_LSH for ADD with sign/zero extension return BuildOperandUses(node->gtGetOp1(), candidates); } #endif return 0; } //------------------------------------------------------------------------ // setDelayFree: Mark a RefPosition as delayRegFree, and set pendingDelayFree // // Arguments: // use - The use RefPosition to mark // void LinearScan::setDelayFree(RefPosition* use) { use->delayRegFree = true; pendingDelayFree = true; } //------------------------------------------------------------------------ // BuildDelayFreeUses: Build Use RefPositions for an operand that might be contained, // and which may need to be marked delayRegFree // // Arguments: // node - The node of interest // rmwNode - The node that has RMW semantics (if applicable) // candidates - The set of candidates for the uses // // Return Value: // The number of source registers used by the *parent* of this node. // int LinearScan::BuildDelayFreeUses(GenTree* node, GenTree* rmwNode, regMaskTP candidates) { RefPosition* use = nullptr; Interval* rmwInterval = nullptr; bool rmwIsLastUse = false; GenTree* addr = nullptr; if ((rmwNode != nullptr) && isCandidateLocalRef(rmwNode)) { rmwInterval = getIntervalForLocalVarNode(rmwNode->AsLclVar()); // Note: we don't handle multi-reg vars here. It's not clear that there are any cases // where we'd encounter a multi-reg var in an RMW context. assert(!rmwNode->AsLclVar()->IsMultiReg()); rmwIsLastUse = rmwNode->AsLclVar()->IsLastUse(0); } if (!node->isContained()) { use = BuildUse(node, candidates); } #ifdef TARGET_ARM64 // Must happen before OperIsHWIntrinsic case, // but this occurs when a vector zero node is marked as contained. else if (node->IsVectorZero()) { return 0; } #endif #ifdef FEATURE_HW_INTRINSICS else if (node->OperIsHWIntrinsic()) { assert(node->AsHWIntrinsic()->GetOperandCount() == 1); use = BuildUse(node->AsHWIntrinsic()->Op(1), candidates); } #endif else if (!node->OperIsIndir()) { return 0; } else { GenTreeIndir* indirTree = node->AsIndir(); addr = indirTree->gtOp1; if (!addr->isContained()) { use = BuildUse(addr, candidates); } else if (!addr->OperIs(GT_LEA)) { return 0; } } if (use != nullptr) { // If node != rmwNode, then definitely node should be marked as "delayFree". // However, if node == rmwNode, then we can mark node as "delayFree" only if // none of the node/rmwNode are the last uses. If either of them are last use, // we can safely reuse the rmwNode as destination. if ((use->getInterval() != rmwInterval) || (!rmwIsLastUse && !use->lastUse)) { setDelayFree(use); } return 1; } // If we reach here we have a contained LEA in 'addr'. GenTreeAddrMode* const addrMode = addr->AsAddrMode(); unsigned srcCount = 0; if ((addrMode->Base() != nullptr) && !addrMode->Base()->isContained()) { use = BuildUse(addrMode->Base(), candidates); if ((use->getInterval() != rmwInterval) || (!rmwIsLastUse && !use->lastUse)) { setDelayFree(use); } srcCount++; } if ((addrMode->Index() != nullptr) && !addrMode->Index()->isContained()) { use = BuildUse(addrMode->Index(), candidates); if ((use->getInterval() != rmwInterval) || (!rmwIsLastUse && !use->lastUse)) { setDelayFree(use); } srcCount++; } return srcCount; } //------------------------------------------------------------------------ // BuildBinaryUses: Get the RefInfoListNodes for the operands of the // given node, and build uses for them. // // Arguments: // node - a GenTreeOp // // Return Value: // The number of actual register operands. // // Notes: // The operands must already have been processed by buildRefPositionsForNode, and their // RefInfoListNodes placed in the defList. // int LinearScan::BuildBinaryUses(GenTreeOp* node, regMaskTP candidates) { GenTree* op1 = node->gtGetOp1(); GenTree* op2 = node->gtGetOp2IfPresent(); #ifdef TARGET_XARCH if (node->OperIsBinary() && isRMWRegOper(node)) { assert(op2 != nullptr); return BuildRMWUses(node, op1, op2, candidates); } #endif // TARGET_XARCH int srcCount = 0; if (op1 != nullptr) { srcCount += BuildOperandUses(op1, candidates); } if (op2 != nullptr) { srcCount += BuildOperandUses(op2, candidates); } return srcCount; } //------------------------------------------------------------------------ // BuildStoreLocDef: Build a definition RefPosition for a local store // // Arguments: // storeLoc - the local store (GT_STORE_LCL_FLD or GT_STORE_LCL_VAR) // // Notes: // This takes an index to enable building multiple defs for a multi-reg local. // void LinearScan::BuildStoreLocDef(GenTreeLclVarCommon* storeLoc, LclVarDsc* varDsc, RefPosition* singleUseRef, int index) { assert(varDsc->lvTracked); unsigned varIndex = varDsc->lvVarIndex; Interval* varDefInterval = getIntervalForLocalVar(varIndex); if (!storeLoc->IsLastUse(index)) { VarSetOps::AddElemD(compiler, currentLiveVars, varIndex); } if (singleUseRef != nullptr) { Interval* srcInterval = singleUseRef->getInterval(); if (srcInterval->relatedInterval == nullptr) { // Preference the source to the dest, unless this is a non-last-use localVar. // Note that the last-use info is not correct, but it is a better approximation than preferencing // the source to the dest, if the source's lifetime extends beyond the dest. if (!srcInterval->isLocalVar || (singleUseRef->treeNode->gtFlags & GTF_VAR_DEATH) != 0) { srcInterval->assignRelatedInterval(varDefInterval); } } else if (!srcInterval->isLocalVar) { // Preference the source to dest, if src is not a local var. srcInterval->assignRelatedInterval(varDefInterval); } } regMaskTP defCandidates = RBM_NONE; var_types type = varDsc->GetRegisterType(); #ifdef TARGET_X86 if (varTypeIsByte(type)) { defCandidates = allByteRegs(); } else { defCandidates = allRegs(type); } #else defCandidates = allRegs(type); #endif // TARGET_X86 RefPosition* def = newRefPosition(varDefInterval, currentLoc + 1, RefTypeDef, storeLoc, defCandidates, index); if (varDefInterval->isWriteThru) { // We always make write-thru defs reg-optional, as we can store them if they don't // get a register. def->regOptional = true; } #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE if (Compiler::varTypeNeedsPartialCalleeSave(varDefInterval->registerType)) { varDefInterval->isPartiallySpilled = false; } #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE } //------------------------------------------------------------------------ // BuildMultiRegStoreLoc: Set register requirements for a store of a lclVar // // Arguments: // storeLoc - the multireg local store (GT_STORE_LCL_VAR) // // Returns: // The number of source registers read. // int LinearScan::BuildMultiRegStoreLoc(GenTreeLclVar* storeLoc) { GenTree* op1 = storeLoc->gtGetOp1(); unsigned int dstCount = storeLoc->GetFieldCount(compiler); unsigned int srcCount = dstCount; LclVarDsc* varDsc = compiler->lvaGetDesc(storeLoc); assert(compiler->lvaEnregMultiRegVars); assert(storeLoc->OperGet() == GT_STORE_LCL_VAR); bool isMultiRegSrc = op1->IsMultiRegNode(); // The source must be: // - a multi-reg source // - an enregisterable SIMD type, or // - in-memory local // if (isMultiRegSrc) { assert(op1->GetMultiRegCount(compiler) == srcCount); } else if (varTypeIsEnregisterable(op1)) { // Create a delay free use, as we'll have to use it to create each field RefPosition* use = BuildUse(op1, RBM_NONE); setDelayFree(use); srcCount = 1; } else { // Otherwise we must have an in-memory struct lclVar. // We will just load directly into the register allocated for this lclVar, // so we don't need to build any uses. assert(op1->OperIs(GT_LCL_VAR) && op1->isContained() && op1->TypeIs(TYP_STRUCT)); srcCount = 0; } // For multi-reg local stores of multi-reg sources, the code generator will read each source // register, and then move it, if needed, to the destination register. These nodes have // 2*N locations where N is the number of registers, so that the liveness can // be reflected accordingly. // for (unsigned int i = 0; i < dstCount; ++i) { LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + i); RefPosition* singleUseRef = nullptr; if (isMultiRegSrc) { regMaskTP srcCandidates = RBM_NONE; #ifdef TARGET_X86 var_types type = fieldVarDsc->TypeGet(); if (varTypeIsByte(type)) { srcCandidates = allByteRegs(); } #endif // TARGET_X86 singleUseRef = BuildUse(op1, srcCandidates, i); } assert(isCandidateVar(fieldVarDsc)); BuildStoreLocDef(storeLoc, fieldVarDsc, singleUseRef, i); if (isMultiRegSrc && (i < (dstCount - 1))) { currentLoc += 2; } } return srcCount; } //------------------------------------------------------------------------ // BuildStoreLoc: Set register requirements for a store of a lclVar // // Arguments: // storeLoc - the local store (GT_STORE_LCL_FLD or GT_STORE_LCL_VAR) // // Notes: // This involves: // - Setting the appropriate candidates. // - Handling of contained immediates. // - Requesting an internal register for SIMD12 stores. // int LinearScan::BuildStoreLoc(GenTreeLclVarCommon* storeLoc) { GenTree* op1 = storeLoc->gtGetOp1(); int srcCount; RefPosition* singleUseRef = nullptr; LclVarDsc* varDsc = compiler->lvaGetDesc(storeLoc); if (storeLoc->IsMultiRegLclVar()) { return BuildMultiRegStoreLoc(storeLoc->AsLclVar()); } // First, define internal registers. #ifdef FEATURE_SIMD if (varTypeIsSIMD(storeLoc) && !op1->IsCnsIntOrI() && (storeLoc->TypeGet() == TYP_SIMD12)) { // Need an additional register to extract upper 4 bytes of Vector3, // it has to be float for x86. buildInternalFloatRegisterDefForNode(storeLoc, allSIMDRegs()); } #endif // FEATURE_SIMD // Second, use source registers. if (op1->IsMultiRegNode() && (op1->GetMultiRegCount(compiler) > 1)) { // This is the case where the source produces multiple registers. // This must be a store lclvar. assert(storeLoc->OperGet() == GT_STORE_LCL_VAR); srcCount = op1->GetMultiRegCount(compiler); for (int i = 0; i < srcCount; ++i) { BuildUse(op1, RBM_NONE, i); } #if defined(FEATURE_SIMD) && defined(TARGET_X86) if (TargetOS::IsWindows && !compiler->compOpportunisticallyDependsOn(InstructionSet_SSE41)) { if (varTypeIsSIMD(storeLoc) && op1->IsCall()) { // Need an additional register to create a SIMD8 from EAX/EDX without SSE4.1. buildInternalFloatRegisterDefForNode(storeLoc, allSIMDRegs()); if (isCandidateVar(varDsc)) { // This internal register must be different from the target register. setInternalRegsDelayFree = true; } } } #endif // FEATURE_SIMD && TARGET_X86 } else if (op1->isContained() && op1->OperIs(GT_BITCAST)) { GenTree* bitCastSrc = op1->gtGetOp1(); RegisterType registerType = bitCastSrc->TypeGet(); singleUseRef = BuildUse(bitCastSrc, allRegs(registerType)); Interval* srcInterval = singleUseRef->getInterval(); assert(srcInterval->registerType == registerType); srcCount = 1; } #ifndef TARGET_64BIT else if (varTypeIsLong(op1)) { // GT_MUL_LONG is handled by the IsMultiRegNode case above. assert(op1->OperIs(GT_LONG)); assert(op1->isContained() && !op1->gtGetOp1()->isContained() && !op1->gtGetOp2()->isContained()); srcCount = BuildBinaryUses(op1->AsOp()); assert(srcCount == 2); } #endif // !TARGET_64BIT else if (op1->isContained()) { #ifdef TARGET_XARCH if (varTypeIsSIMD(storeLoc)) { // This is the zero-init case, and we need a register to hold the zero. // (On Arm64 we can just store REG_ZR.) assert(op1->IsSIMDZero()); singleUseRef = BuildUse(op1->gtGetOp1()); srcCount = 1; } else #endif { srcCount = 0; } } else { srcCount = 1; regMaskTP srcCandidates = RBM_NONE; #ifdef TARGET_X86 var_types type = varDsc->GetRegisterType(storeLoc); if (varTypeIsByte(type)) { srcCandidates = allByteRegs(); } #endif // TARGET_X86 singleUseRef = BuildUse(op1, srcCandidates); } // Third, use internal registers. #ifdef TARGET_ARM if (storeLoc->OperIs(GT_STORE_LCL_FLD) && storeLoc->AsLclFld()->IsOffsetMisaligned()) { buildInternalIntRegisterDefForNode(storeLoc); // to generate address. buildInternalIntRegisterDefForNode(storeLoc); // to move float into an int reg. if (storeLoc->TypeIs(TYP_DOUBLE)) { buildInternalIntRegisterDefForNode(storeLoc); // to move the second half into an int reg. } } #endif // TARGET_ARM #if defined(FEATURE_SIMD) || defined(TARGET_ARM) buildInternalRegisterUses(); #endif // FEATURE_SIMD || TARGET_ARM // Fourth, define destination registers. // Add the lclVar to currentLiveVars (if it will remain live) if (isCandidateVar(varDsc)) { BuildStoreLocDef(storeLoc, varDsc, singleUseRef, 0); } return srcCount; } //------------------------------------------------------------------------ // BuildSimple: Builds use RefPositions for trees requiring no special handling // // Arguments: // tree - The node of interest // // Return Value: // The number of use RefPositions created // int LinearScan::BuildSimple(GenTree* tree) { unsigned kind = tree->OperKind(); int srcCount = 0; if ((kind & GTK_LEAF) == 0) { assert((kind & GTK_SMPOP) != 0); srcCount = BuildBinaryUses(tree->AsOp()); } if (tree->IsValue()) { BuildDef(tree); } return srcCount; } //------------------------------------------------------------------------ // BuildReturn: Set the NodeInfo for a GT_RETURN. // // Arguments: // tree - The node of interest // // Return Value: // The number of sources consumed by this node. // int LinearScan::BuildReturn(GenTree* tree) { GenTree* op1 = tree->gtGetOp1(); #if !defined(TARGET_64BIT) if (tree->TypeGet() == TYP_LONG) { assert((op1->OperGet() == GT_LONG) && op1->isContained()); GenTree* loVal = op1->gtGetOp1(); GenTree* hiVal = op1->gtGetOp2(); BuildUse(loVal, RBM_LNGRET_LO); BuildUse(hiVal, RBM_LNGRET_HI); return 2; } else #endif // !defined(TARGET_64BIT) if ((tree->TypeGet() != TYP_VOID) && !op1->isContained()) { regMaskTP useCandidates = RBM_NONE; #if FEATURE_MULTIREG_RET #ifdef TARGET_ARM64 if (varTypeIsSIMD(tree) && !op1->IsMultiRegLclVar()) { useCandidates = allSIMDRegs(); if (op1->OperGet() == GT_LCL_VAR) { assert(op1->TypeGet() != TYP_SIMD32); useCandidates = RBM_DOUBLERET; } BuildUse(op1, useCandidates); return 1; } #endif // TARGET_ARM64 if (varTypeIsStruct(tree)) { // op1 has to be either a lclvar or a multi-reg returning call if ((op1->OperGet() == GT_LCL_VAR) && !op1->IsMultiRegLclVar()) { BuildUse(op1, useCandidates); } else { noway_assert(op1->IsMultiRegCall() || op1->IsMultiRegLclVar()); int srcCount; ReturnTypeDesc nonCallRetTypeDesc; const ReturnTypeDesc* pRetTypeDesc; if (op1->OperIs(GT_CALL)) { pRetTypeDesc = op1->AsCall()->GetReturnTypeDesc(); } else { assert(compiler->lvaEnregMultiRegVars); LclVarDsc* varDsc = compiler->lvaGetDesc(op1->AsLclVar()); nonCallRetTypeDesc.InitializeStructReturnType(compiler, varDsc->GetStructHnd(), compiler->info.compCallConv); pRetTypeDesc = &nonCallRetTypeDesc; assert(compiler->lvaGetDesc(op1->AsLclVar())->lvFieldCnt == nonCallRetTypeDesc.GetReturnRegCount()); } srcCount = pRetTypeDesc->GetReturnRegCount(); // For any source that's coming from a different register file, we need to ensure that // we reserve the specific ABI register we need. bool hasMismatchedRegTypes = false; if (op1->IsMultiRegLclVar()) { for (int i = 0; i < srcCount; i++) { RegisterType srcType = regType(op1->AsLclVar()->GetFieldTypeByIndex(compiler, i)); RegisterType dstType = regType(pRetTypeDesc->GetReturnRegType(i)); if (srcType != dstType) { hasMismatchedRegTypes = true; regMaskTP dstRegMask = genRegMask(pRetTypeDesc->GetABIReturnReg(i)); if (varTypeUsesFloatReg(dstType)) { buildInternalFloatRegisterDefForNode(tree, dstRegMask); } else { buildInternalIntRegisterDefForNode(tree, dstRegMask); } } } } for (int i = 0; i < srcCount; i++) { // We will build uses of the type of the operand registers/fields, and the codegen // for return will move as needed. if (!hasMismatchedRegTypes || (regType(op1->AsLclVar()->GetFieldTypeByIndex(compiler, i)) == regType(pRetTypeDesc->GetReturnRegType(i)))) { BuildUse(op1, genRegMask(pRetTypeDesc->GetABIReturnReg(i)), i); } else { BuildUse(op1, RBM_NONE, i); } } if (hasMismatchedRegTypes) { buildInternalRegisterUses(); } return srcCount; } } else #endif // FEATURE_MULTIREG_RET { // Non-struct type return - determine useCandidates switch (tree->TypeGet()) { case TYP_VOID: useCandidates = RBM_NONE; break; case TYP_FLOAT: useCandidates = RBM_FLOATRET; break; case TYP_DOUBLE: // We ONLY want the valid double register in the RBM_DOUBLERET mask. useCandidates = (RBM_DOUBLERET & RBM_ALLDOUBLE); break; case TYP_LONG: useCandidates = RBM_LNGRET; break; default: useCandidates = RBM_INTRET; break; } BuildUse(op1, useCandidates); return 1; } } // No kills or defs. return 0; } //------------------------------------------------------------------------ // supportsSpecialPutArg: Determine if we can support specialPutArgs // // Return Value: // True iff specialPutArg intervals can be supported. // // Notes: // See below. // bool LinearScan::supportsSpecialPutArg() { #if defined(DEBUG) && defined(TARGET_X86) // On x86, `LSRA_LIMIT_CALLER` is too restrictive to allow the use of special put args: this stress mode // leaves only three registers allocatable--eax, ecx, and edx--of which the latter two are also used for the // first two integral arguments to a call. This can leave us with too few registers to succesfully allocate in // situations like the following: // // t1026 = lclVar ref V52 tmp35 u:3 REG NA <l:$3a1, c:$98d> // // /--* t1026 ref // t1352 = * putarg_reg ref REG NA // // t342 = lclVar int V14 loc6 u:4 REG NA $50c // // t343 = const int 1 REG NA $41 // // /--* t342 int // +--* t343 int // t344 = * + int REG NA $495 // // t345 = lclVar int V04 arg4 u:2 REG NA $100 // // /--* t344 int // +--* t345 int // t346 = * % int REG NA $496 // // /--* t346 int // t1353 = * putarg_reg int REG NA // // t1354 = lclVar ref V52 tmp35 (last use) REG NA // // /--* t1354 ref // t1355 = * lea(b+0) byref REG NA // // Here, the first `putarg_reg` would normally be considered a special put arg, which would remove `ecx` from the // set of allocatable registers, leaving only `eax` and `edx`. The allocator will then fail to allocate a register // for the def of `t345` if arg4 is not a register candidate: the corresponding ref position will be constrained to // { `ecx`, `ebx`, `esi`, `edi` }, which `LSRA_LIMIT_CALLER` will further constrain to `ecx`, which will not be // available due to the special put arg. return getStressLimitRegs() != LSRA_LIMIT_CALLER; #else return true; #endif } //------------------------------------------------------------------------ // BuildPutArgReg: Set the NodeInfo for a PUTARG_REG. // // Arguments: // node - The PUTARG_REG node. // argReg - The register in which to pass the argument. // info - The info for the node's using call. // isVarArgs - True if the call uses a varargs calling convention. // callHasFloatRegArgs - Set to true if this PUTARG_REG uses an FP register. // // Return Value: // None. // int LinearScan::BuildPutArgReg(GenTreeUnOp* node) { assert(node != nullptr); assert(node->OperIsPutArgReg()); regNumber argReg = node->GetRegNum(); assert(argReg != REG_NA); bool isSpecialPutArg = false; int srcCount = 1; GenTree* op1 = node->gtGetOp1(); // First, handle the GT_OBJ case, which loads into the arg register // (so we don't set the use to prefer that register for the source address). if (op1->OperIs(GT_OBJ)) { GenTreeObj* obj = op1->AsObj(); GenTree* addr = obj->Addr(); unsigned size = obj->GetLayout()->GetSize(); assert(size <= MAX_PASS_SINGLEREG_BYTES); if (addr->OperIsLocalAddr()) { // We don't need a source register. assert(addr->isContained()); srcCount = 0; } else if (!isPow2(size)) { // We'll need an internal register to do the odd-size load. // This can only happen with integer registers. assert(genIsValidIntReg(argReg)); buildInternalIntRegisterDefForNode(node); BuildUse(addr); buildInternalRegisterUses(); } return srcCount; } // To avoid redundant moves, have the argument operand computed in the // register in which the argument is passed to the call. regMaskTP argMask = genRegMask(argReg); RefPosition* use = BuildUse(op1, argMask); if (supportsSpecialPutArg() && isCandidateLocalRef(op1) && ((op1->gtFlags & GTF_VAR_DEATH) == 0)) { // This is the case for a "pass-through" copy of a lclVar. In the case where it is a non-last-use, // we don't want the def of the copy to kill the lclVar register, if it is assigned the same register // (which is actually what we hope will happen). JITDUMP("Setting putarg_reg as a pass-through of a non-last use lclVar\n"); // Preference the destination to the interval of the first register defined by the first operand. assert(use->getInterval()->isLocalVar); isSpecialPutArg = true; } #ifdef TARGET_ARM // If type of node is `long` then it is actually `double`. // The actual `long` types must have been transformed as a field list with two fields. if (node->TypeGet() == TYP_LONG) { srcCount++; regMaskTP argMaskHi = genRegMask(REG_NEXT(argReg)); assert(genRegArgNext(argReg) == REG_NEXT(argReg)); use = BuildUse(op1, argMaskHi, 1); BuildDef(node, argMask, 0); BuildDef(node, argMaskHi, 1); } else #endif // TARGET_ARM { RefPosition* def = BuildDef(node, argMask); if (isSpecialPutArg) { def->getInterval()->isSpecialPutArg = true; def->getInterval()->assignRelatedInterval(use->getInterval()); } } return srcCount; } //------------------------------------------------------------------------ // HandleFloatVarArgs: Handle additional register requirements for a varargs call // // Arguments: // call - The call node of interest // argNode - The current argument // // Return Value: // None. // // Notes: // In the case of a varargs call, the ABI dictates that if we have floating point args, // we must pass the enregistered arguments in both the integer and floating point registers. // Since the integer register is not associated with the arg node, we will reserve it as // an internal register on the call so that it is not used during the evaluation of the call node // (e.g. for the target). void LinearScan::HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* callHasFloatRegArgs) { if (compFeatureVarArg() && call->IsVarargs() && varTypeIsFloating(argNode)) { *callHasFloatRegArgs = true; // We'll have to return the internal def and then later create a use for it. regNumber argReg = argNode->GetRegNum(); regNumber targetReg = compiler->getCallArgIntRegister(argReg); buildInternalIntRegisterDefForNode(call, genRegMask(targetReg)); } } //------------------------------------------------------------------------ // BuildGCWriteBarrier: Handle additional register requirements for a GC write barrier // // Arguments: // tree - The STORE_IND for which a write barrier is required // int LinearScan::BuildGCWriteBarrier(GenTree* tree) { GenTree* addr = tree->gtGetOp1(); GenTree* src = tree->gtGetOp2(); // In the case where we are doing a helper assignment, even if the dst // is an indir through an lea, we need to actually instantiate the // lea in a register assert(!addr->isContained() && !src->isContained()); regMaskTP addrCandidates = RBM_ARG_0; regMaskTP srcCandidates = RBM_ARG_1; #if defined(TARGET_ARM64) // the 'addr' goes into x14 (REG_WRITE_BARRIER_DST) // the 'src' goes into x15 (REG_WRITE_BARRIER_SRC) // addrCandidates = RBM_WRITE_BARRIER_DST; srcCandidates = RBM_WRITE_BARRIER_SRC; #elif defined(TARGET_X86) && NOGC_WRITE_BARRIERS bool useOptimizedWriteBarrierHelper = compiler->codeGen->genUseOptimizedWriteBarriers(tree, src); if (useOptimizedWriteBarrierHelper) { // Special write barrier: // op1 (addr) goes into REG_WRITE_BARRIER (rdx) and // op2 (src) goes into any int register. addrCandidates = RBM_WRITE_BARRIER; srcCandidates = RBM_WRITE_BARRIER_SRC; } #endif // defined(TARGET_X86) && NOGC_WRITE_BARRIERS BuildUse(addr, addrCandidates); BuildUse(src, srcCandidates); regMaskTP killMask = getKillSetForStoreInd(tree->AsStoreInd()); buildKillPositionsForNode(tree, currentLoc + 1, killMask); return 2; } //------------------------------------------------------------------------ // BuildCmp: Set the register requirements for a compare. // // Arguments: // tree - The node of interest // // Return Value: // None. // int LinearScan::BuildCmp(GenTree* tree) { assert(tree->OperIsCompare() || tree->OperIs(GT_CMP) || tree->OperIs(GT_JCMP)); regMaskTP dstCandidates = RBM_NONE; regMaskTP op1Candidates = RBM_NONE; regMaskTP op2Candidates = RBM_NONE; GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); #ifdef TARGET_X86 // If the compare is used by a jump, we just need to set the condition codes. If not, then we need // to store the result into the low byte of a register, which requires the dst be a byteable register. if (tree->TypeGet() != TYP_VOID) { dstCandidates = allByteRegs(); } bool needByteRegs = false; if (varTypeIsByte(tree)) { if (!varTypeIsFloating(op1)) { needByteRegs = true; } } // Example1: GT_EQ(int, op1 of type ubyte, op2 of type ubyte) - in this case codegen uses // ubyte as the result of comparison and if the result needs to be materialized into a reg // simply zero extend it to TYP_INT size. Here is an example of generated code: // cmp dl, byte ptr[addr mode] // movzx edx, dl else if (varTypeIsByte(op1) && varTypeIsByte(op2)) { needByteRegs = true; } // Example2: GT_EQ(int, op1 of type ubyte, op2 is GT_CNS_INT) - in this case codegen uses // ubyte as the result of the comparison and if the result needs to be materialized into a reg // simply zero extend it to TYP_INT size. else if (varTypeIsByte(op1) && op2->IsCnsIntOrI()) { needByteRegs = true; } // Example3: GT_EQ(int, op1 is GT_CNS_INT, op2 of type ubyte) - in this case codegen uses // ubyte as the result of the comparison and if the result needs to be materialized into a reg // simply zero extend it to TYP_INT size. else if (op1->IsCnsIntOrI() && varTypeIsByte(op2)) { needByteRegs = true; } if (needByteRegs) { if (!op1->isContained()) { op1Candidates = allByteRegs(); } if (!op2->isContained()) { op2Candidates = allByteRegs(); } } #endif // TARGET_X86 int srcCount = BuildOperandUses(op1, op1Candidates); srcCount += BuildOperandUses(op2, op2Candidates); if (tree->TypeGet() != TYP_VOID) { BuildDef(tree, dstCandidates); } return srcCount; }
1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/scripts/superpmi_replay.py
#!/usr/bin/env python3 # # Licensed to the .NET Foundation under one or more agreements. # The .NET Foundation licenses this file to you under the MIT license. # # Title : superpmi_replay.py # # Notes: # # Script to run "superpmi replay" for various collections under various COMPlus_JitStressRegs values. # ################################################################################ ################################################################################ import argparse import os from coreclr_arguments import * from jitutil import run_command parser = argparse.ArgumentParser(description="description") parser.add_argument("-arch", help="Architecture") parser.add_argument("-platform", help="OS platform") parser.add_argument("-jit_directory", help="path to the directory containing clrjit binaries") parser.add_argument("-log_directory", help="path to the directory containing superpmi log files") parser.add_argument("-partition", help="Partition number specifying which set of flags to use: between 1 and the `-partition_count` value") parser.add_argument("-partition_count", help="Count of the total number of partitions we are using: should be <= 9 (number of jit_flags_all elements)") jit_flags_all = [ "JitStressRegs=0", # JitStressRegs=1 disabled due to https://github.com/dotnet/runtime/issues/65332 # "JitStressRegs=1", "JitStressRegs=2", "JitStressRegs=3", "JitStressRegs=4", # JitStressRegs=8 disabled due to https://github.com/dotnet/runtime/issues/65332 # "JitStressRegs=8", "JitStressRegs=0x10", "JitStressRegs=0x80", "JitStressRegs=0x1000", ] def split(a, n): """ Splits array `a` in `n` partitions. Slightly modified from https://stackoverflow.com/a/2135920. Args: args (ArgParse): args parsed by arg parser Returns: args (CoreclrArguments) """ k, m = divmod(len(a), n) return [a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n)] def setup_args(args): """ Setup the args for SuperPMI to use. Args: args (ArgParse): args parsed by arg parser Returns: args (CoreclrArguments) """ coreclr_args = CoreclrArguments(args, require_built_core_root=False, require_built_product_dir=False, require_built_test_dir=False, default_build_type="Checked") coreclr_args.verify(args, "arch", lambda unused: True, "Unable to set arch") coreclr_args.verify(args, "platform", lambda unused: True, "Unable to set platform") coreclr_args.verify(args, "jit_directory", lambda jit_directory: os.path.isdir(jit_directory), "jit_directory doesn't exist") coreclr_args.verify(args, "log_directory", lambda log_directory: os.path.isdir(log_directory), "log_directory doesn't exist") coreclr_args.verify(args, "partition", lambda partition: True, "Unable to set partition") coreclr_args.verify(args, "partition_count", lambda partition: True, "Unable to set partition_count") try: coreclr_args.partition = int(coreclr_args.partition) except ValueError as e: print("Illegal `-partition` value: " + str(coreclr_args.partition)) sys.exit(1) try: coreclr_args.partition_count = int(coreclr_args.partition_count) except ValueError as e: print("Illegal `-partition_count` value: " + str(coreclr_args.partition_count)) sys.exit(1) if coreclr_args.partition_count <= 0: print("Illegal `-partition_count` value: " + str(coreclr_args.partition_count)) sys.exit(1) if coreclr_args.partition < 1 or coreclr_args.partition > coreclr_args.partition_count: print("Illegal `-partition` value: " + str(coreclr_args.partition)) sys.exit(1) return coreclr_args def main(main_args): """Main entrypoint Args: main_args ([type]): Arguments to the script """ python_path = sys.executable cwd = os.path.dirname(os.path.realpath(__file__)) coreclr_args = setup_args(main_args) spmi_location = os.path.join(cwd, "artifacts", "spmi") log_directory = coreclr_args.log_directory platform_name = coreclr_args.platform os_name = "win" if platform_name.lower() == "windows" else "unix" arch_name = coreclr_args.arch host_arch_name = "x64" if arch_name.endswith("64") else "x86" os_name = "universal" if arch_name.startswith("arm") else os_name jit_path = os.path.join(coreclr_args.jit_directory, 'clrjit_{}_{}_{}.dll'.format(os_name, arch_name, host_arch_name)) jit_flags_partitioned = split(jit_flags_all, coreclr_args.partition_count) jit_flags = jit_flags_partitioned[coreclr_args.partition - 1] # partition number is 1-based print("Running superpmi.py download") run_command([python_path, os.path.join(cwd, "superpmi.py"), "download", "--no_progress", "-target_os", platform_name, "-target_arch", arch_name, "-core_root", cwd, "-spmi_location", spmi_location, "-log_level", "debug"], _exit_on_fail=True) failed_runs = [] for jit_flag in jit_flags: log_file = os.path.join(log_directory, 'superpmi_{}.log'.format(jit_flag.replace("=", "_"))) print("Running superpmi.py replay for {}".format(jit_flag)) _, _, return_code = run_command([ python_path, os.path.join(cwd, "superpmi.py"), "replay", "-core_root", cwd, "-jitoption", jit_flag, "-jitoption", "TieredCompilation=0", "-target_os", platform_name, "-target_arch", arch_name, "-arch", host_arch_name, "-jit_path", jit_path, "-spmi_location", spmi_location, "-log_level", "debug", "-log_file", log_file]) if return_code != 0: failed_runs.append("Failure in {}".format(log_file)) # Consolidate all superpmi_*.logs in superpmi_platform_architecture.log final_log_name = os.path.join(log_directory, "superpmi_{}_{}_{}.log".format(platform_name, arch_name, coreclr_args.partition)) print("Consolidating final {}".format(final_log_name)) with open(final_log_name, "a") as final_superpmi_log: for superpmi_log in os.listdir(log_directory): if not superpmi_log.startswith("superpmi_Jit") or not superpmi_log.endswith(".log"): continue print("Appending {}".format(superpmi_log)) final_superpmi_log.write("======================================================={}".format(os.linesep)) final_superpmi_log.write("Contents from {}{}".format(superpmi_log, os.linesep)) final_superpmi_log.write("======================================================={}".format(os.linesep)) with open(os.path.join(log_directory, superpmi_log), "r") as current_superpmi_log: contents = current_superpmi_log.read() final_superpmi_log.write(contents) # Log failures summary if len(failed_runs) > 0: final_superpmi_log.write(os.linesep) final_superpmi_log.write(os.linesep) final_superpmi_log.write("========Failed runs summary========".format(os.linesep)) final_superpmi_log.write(os.linesep.join(failed_runs)) return 0 if len(failed_runs) == 0 else 1 if __name__ == "__main__": args = parser.parse_args() sys.exit(main(args))
#!/usr/bin/env python3 # # Licensed to the .NET Foundation under one or more agreements. # The .NET Foundation licenses this file to you under the MIT license. # # Title : superpmi_replay.py # # Notes: # # Script to run "superpmi replay" for various collections under various COMPlus_JitStressRegs values. # ################################################################################ ################################################################################ import argparse import os from coreclr_arguments import * from jitutil import run_command parser = argparse.ArgumentParser(description="description") parser.add_argument("-arch", help="Architecture") parser.add_argument("-platform", help="OS platform") parser.add_argument("-jit_directory", help="path to the directory containing clrjit binaries") parser.add_argument("-log_directory", help="path to the directory containing superpmi log files") parser.add_argument("-partition", help="Partition number specifying which set of flags to use: between 1 and the `-partition_count` value") parser.add_argument("-partition_count", help="Count of the total number of partitions we are using: should be <= 9 (number of jit_flags_all elements)") jit_flags_all = [ "JitStressRegs=0", "JitStressRegs=1", "JitStressRegs=2", "JitStressRegs=3", "JitStressRegs=4", "JitStressRegs=8", "JitStressRegs=0x10", "JitStressRegs=0x80", "JitStressRegs=0x1000", ] def split(a, n): """ Splits array `a` in `n` partitions. Slightly modified from https://stackoverflow.com/a/2135920. Args: args (ArgParse): args parsed by arg parser Returns: args (CoreclrArguments) """ k, m = divmod(len(a), n) return [a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n)] def setup_args(args): """ Setup the args for SuperPMI to use. Args: args (ArgParse): args parsed by arg parser Returns: args (CoreclrArguments) """ coreclr_args = CoreclrArguments(args, require_built_core_root=False, require_built_product_dir=False, require_built_test_dir=False, default_build_type="Checked") coreclr_args.verify(args, "arch", lambda unused: True, "Unable to set arch") coreclr_args.verify(args, "platform", lambda unused: True, "Unable to set platform") coreclr_args.verify(args, "jit_directory", lambda jit_directory: os.path.isdir(jit_directory), "jit_directory doesn't exist") coreclr_args.verify(args, "log_directory", lambda log_directory: os.path.isdir(log_directory), "log_directory doesn't exist") coreclr_args.verify(args, "partition", lambda partition: True, "Unable to set partition") coreclr_args.verify(args, "partition_count", lambda partition: True, "Unable to set partition_count") try: coreclr_args.partition = int(coreclr_args.partition) except ValueError as e: print("Illegal `-partition` value: " + str(coreclr_args.partition)) sys.exit(1) try: coreclr_args.partition_count = int(coreclr_args.partition_count) except ValueError as e: print("Illegal `-partition_count` value: " + str(coreclr_args.partition_count)) sys.exit(1) if coreclr_args.partition_count <= 0: print("Illegal `-partition_count` value: " + str(coreclr_args.partition_count)) sys.exit(1) if coreclr_args.partition < 1 or coreclr_args.partition > coreclr_args.partition_count: print("Illegal `-partition` value: " + str(coreclr_args.partition)) sys.exit(1) return coreclr_args def main(main_args): """Main entrypoint Args: main_args ([type]): Arguments to the script """ python_path = sys.executable cwd = os.path.dirname(os.path.realpath(__file__)) coreclr_args = setup_args(main_args) spmi_location = os.path.join(cwd, "artifacts", "spmi") log_directory = coreclr_args.log_directory platform_name = coreclr_args.platform os_name = "win" if platform_name.lower() == "windows" else "unix" arch_name = coreclr_args.arch host_arch_name = "x64" if arch_name.endswith("64") else "x86" os_name = "universal" if arch_name.startswith("arm") else os_name jit_path = os.path.join(coreclr_args.jit_directory, 'clrjit_{}_{}_{}.dll'.format(os_name, arch_name, host_arch_name)) jit_flags_partitioned = split(jit_flags_all, coreclr_args.partition_count) jit_flags = jit_flags_partitioned[coreclr_args.partition - 1] # partition number is 1-based print("Running superpmi.py download") run_command([python_path, os.path.join(cwd, "superpmi.py"), "download", "--no_progress", "-target_os", platform_name, "-target_arch", arch_name, "-core_root", cwd, "-spmi_location", spmi_location, "-log_level", "debug"], _exit_on_fail=True) failed_runs = [] for jit_flag in jit_flags: log_file = os.path.join(log_directory, 'superpmi_{}.log'.format(jit_flag.replace("=", "_"))) print("Running superpmi.py replay for {}".format(jit_flag)) _, _, return_code = run_command([ python_path, os.path.join(cwd, "superpmi.py"), "replay", "-core_root", cwd, "-jitoption", jit_flag, "-jitoption", "TieredCompilation=0", "-target_os", platform_name, "-target_arch", arch_name, "-arch", host_arch_name, "-jit_path", jit_path, "-spmi_location", spmi_location, "-log_level", "debug", "-log_file", log_file]) if return_code != 0: failed_runs.append("Failure in {}".format(log_file)) # Consolidate all superpmi_*.logs in superpmi_platform_architecture.log final_log_name = os.path.join(log_directory, "superpmi_{}_{}_{}.log".format(platform_name, arch_name, coreclr_args.partition)) print("Consolidating final {}".format(final_log_name)) with open(final_log_name, "a") as final_superpmi_log: for superpmi_log in os.listdir(log_directory): if not superpmi_log.startswith("superpmi_Jit") or not superpmi_log.endswith(".log"): continue print("Appending {}".format(superpmi_log)) final_superpmi_log.write("======================================================={}".format(os.linesep)) final_superpmi_log.write("Contents from {}{}".format(superpmi_log, os.linesep)) final_superpmi_log.write("======================================================={}".format(os.linesep)) with open(os.path.join(log_directory, superpmi_log), "r") as current_superpmi_log: contents = current_superpmi_log.read() final_superpmi_log.write(contents) # Log failures summary if len(failed_runs) > 0: final_superpmi_log.write(os.linesep) final_superpmi_log.write(os.linesep) final_superpmi_log.write("========Failed runs summary========".format(os.linesep)) final_superpmi_log.write(os.linesep.join(failed_runs)) return 0 if len(failed_runs) == 0 else 1 if __name__ == "__main__": args = parser.parse_args() sys.exit(main(args))
1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/jit/targetarm64.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*****************************************************************************/ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #if defined(TARGET_ARM64) #include "target.h" const char* Target::g_tgtCPUName = "arm64"; const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_R2L; const Target::ArgOrder Target::g_tgtUnmanagedArgOrder = ARG_ORDER_R2L; // clang-format off const regNumber intArgRegs [] = {REG_R0, REG_R1, REG_R2, REG_R3, REG_R4, REG_R5, REG_R6, REG_R7}; const regMaskTP intArgMasks[] = {RBM_R0, RBM_R1, RBM_R2, RBM_R3, RBM_R4, RBM_R5, RBM_R6, RBM_R7}; const regNumber fltArgRegs [] = {REG_V0, REG_V1, REG_V2, REG_V3, REG_V4, REG_V5, REG_V6, REG_V7 }; const regMaskTP fltArgMasks[] = {RBM_V0, RBM_V1, RBM_V2, RBM_V3, RBM_V4, RBM_V5, RBM_V6, RBM_V7 }; // clang-format on #endif // TARGET_ARM64
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*****************************************************************************/ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #if defined(TARGET_ARM64) #include "target.h" const char* Target::g_tgtCPUName = "arm64"; const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_R2L; const Target::ArgOrder Target::g_tgtUnmanagedArgOrder = ARG_ORDER_R2L; // clang-format off const regNumber intArgRegs [] = {REG_R0, REG_R1, REG_R2, REG_R3, REG_R4, REG_R5, REG_R6, REG_R7}; const regMaskTP intArgMasks[] = {RBM_R0, RBM_R1, RBM_R2, RBM_R3, RBM_R4, RBM_R5, RBM_R6, RBM_R7}; const regNumber fltArgRegs [] = {REG_V0, REG_V1, REG_V2, REG_V3, REG_V4, REG_V5, REG_V6, REG_V7 }; const regMaskTP fltArgMasks[] = {RBM_V0, RBM_V1, RBM_V2, RBM_V3, RBM_V4, RBM_V5, RBM_V6, RBM_V7 }; // clang-format on #endif // TARGET_ARM64
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/debug/inc/dacdbiinterface.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // DacDbiInterface.h // // // Define the interface between the DAC and DBI. //***************************************************************************** #ifndef _DACDBI_INTERFACE_H_ #define _DACDBI_INTERFACE_H_ #include <metahost.h> // The DAC/DBI interface can use structures and LSPTR declarations from the // existing V2 interfaces #include "dbgipcevents.h" //----------------------------------------------------------------------------- // Deallocation function for memory allocated with the global IAllocator object. // // Arguments: // p - pointer to delete. Allocated with IAllocator::Alloc // // Notes: // This should invoke the dtor and then call IAllocator::Free. // In the DAC implementation, this will call via IAllocator. // In the DBI implementation, this can directly call delete (assuming the IAllocator::Free // directly called new). template<class T> void DeleteDbiMemory(T *p); template<class T> void DeleteDbiArrayMemory(T *p, int count); // Need a class to serve as a tag that we can use to overload New/Delete. class forDbiWorker {}; extern forDbiWorker forDbi; extern void * operator new(size_t lenBytes, const forDbiWorker &); extern void * operator new[](size_t lenBytes, const forDbiWorker &); extern void operator delete(void *p, const forDbiWorker &); extern void operator delete[](void *p, const forDbiWorker &); // The dac exposes a way to walk all GC references in the process. This // includes both strong references and weak references. This is done // through a referece walk. typedef void* * RefWalkHandle; #include "dacdbistructures.h" // This is the current format of code:DbiVersion. It needs to be rev'ed when we decide to store something // else other than the product version of the DBI in DbiVersion (e.g. a timestamp). See // code:CordbProcess::CordbProcess#DBIVersionChecking for more information. const DWORD kCurrentDbiVersionFormat = 1; //----------------------------------------------------------------------------- // This is a low-level interface between DAC and DBI. // The DAC is the raw DAC-ized code from the EE. // DBI is the implementation of ICorDebug on top of that. // // This interface should be: // - Stateless: The DAC component should not have any persistent state. It should not have any resources // that it needs to clean up. DBI can store all the state (eg, list of of modules). // Using IAllocator/IStringHolder interfaces to allocate data to pass back out is ok because DBI owns // the resources, not the DAC layer. // - blittable: The types on the interface should be blittable. For example, use TIDs instead of OS Thread handles. // Passing pointers to be used as out-parameters is ok. // - lightweight: it will inevitably have many methods on it and should be very fluid to use. // - very descriptive: heavily call out liabilities on the runtime. For example, don't just have a method like // "GetName" where Name is ambiguous. Heavily comment exactly what Name is, when it may fail, if it's 0-length, // if it's unique, etc. This serves two purposes: // a) it helps ensure the right invariants flow up to the public API level. // b) it helps ensure that the debugger is making the right assumptions about the runtime's behavior. // // #Marshaling: // This interface should be marshalable such that the caller (the Right Side) can exist in one // process, while the implementation of Dac could be on another machine. // - All types need to be marshable. // - Use OUT and OPTIONAL as defined in windef.h to guide the marshaler. Here are how types are marshaled: // T : value-type, copied on input. // T* : will be marshaled as non-null by-ref (copy on input, copy on return), // const T*: non-null, copy on input only. // OUT T*: non-null copy-on-return only. // OPTIONAL T*: by-ref, could be null. // - The marshaler has special knowledge of IStringHolder and DacDbiArrayList<T>. // - You can write custom marshalers for non-blittable structures defined in DacDbiStructures.h. // - There is custom handling for marshalling callbacks. // // // Threading: The interface (and the underlying DataTarget) are free-threaded to leverage // concurrency. // // Allocation: // This interface can use IAllocator to allocate objects and hand them back. The allocated objects should be: // - closed, serializable object graphs. // - should have private fields and public accessors // - have dtors that free any allocated the memory via calling DeleteDbiMemory. // Objects can be declared in a header and shared between both dbi and dac. // Consider using DacDbiArrayList<T> instead of custom allocations. // Error handling: // Any call on the interface may fail. For example, the data-target may not have access to the necessary memory. // Methods should throw on error. // // #Enumeration // General rules about Enumerations: // - Do not assume that enumerations exposed here are in any particular order. // - many enumerations also correspond to Load/Unload events. Since load/unload aren't atomic with publishing // in an enumeration, this is a Total Ordering of things: // a) object shows up in enumeration // b) load event. // c) ... steady state ... // d) object removed from DacDbi enumeration; // Any existing handles we get beyond this are explicitly associated with a Cordb* object; which can be // neutered on the unload event by Dbi. // e) unload event. // - Send after it's reachability from other objects is broken. (Eg, For AppDomain unload // means no threads left in that appdomain) // - Send before it's deleted (so VMPTR is still valid; not yet recycled). // - Send early enough that property access can at least gracefully fail. (eg, // Module::GetName should either return the name, or fail) // // Cordb must neuter any Cordb objects that have any pre-existing handles to the object. // After this point, gauranteed that nobody can discover the VMPTR any more: // - doesn't show up in enumerations (so can't be discoverered implicitly) // - object should not be discoverable by other objects in VM. // - any Cordb object that already had it would be neutered by Dbi. // - Therefore nothing should even be asking Dac for it. // f) object deleted. // Think of it like this: The event occurs to let you know that the enumeration has been updated. // // A robust debugger should not rely on events for correctness. For example, // a corrupt debuggee may send: // 1) multiple load events. (if target repeats due to an issue) // 2) no load event and only an unload event. (if target fails inbetween // publish (a) and load (b), and then backout code sends the unload). // 3) no unload event. (eg, if target is rudely killed) // 4) multiple unload events (if target repeats due to bug) // // This satisfies the following rules: // - once you get the load event, you can find the object via enumeration // - once an item is discoverable, it must immediately show up in the enumeration. // - once you get the unload event, the object is dead and can't be rediscovered via enumeration. // // This is an issue even for well-behaved targets. Imagine if a debugger attaches right after // an unload event is sent. We don't want the debugger to enumerate and re-discover the // unloaded object because now that the unload event is already sent, the debugger won't get // any further notification of when the object is deleted in the target. // Thus it's valuable for the debugger to have debug-only checks after unload events to assert // that the object is no longer discoverable. // //............................................................................. // The purpose of this object is to provide EE funcationality back to // the debugger. This represents the entire set of EE functions used // by the debugger. // // We will make this interface larger over time to grow the functionality // between the EE and the Debugger. // // //----------------------------------------------------------------------------- class IDacDbiInterface { public: class IStringHolder; // The following tag tells the DD-marshalling tool to start scanning. // BEGIN_MARSHAL //----------------------------------------------------------------------------- // Functions to control the behavior of the DacDbi implementation itself. //----------------------------------------------------------------------------- // // Check whether the version of the DBI matches the version of the runtime. // This is only called when we are remote debugging. On Windows, we should have checked all the // versions before we call any API on the IDacDbiInterface. See // code:CordbProcess::CordbProcess#DBIVersionChecking for more information on version checks. // // Return Value: // S_OK on success. // // Notes: // THIS MUST BE THE FIRST API ON THE INTERFACE! // virtual HRESULT CheckDbiVersion(const DbiVersion * pVersion) = 0; // // Flush the DAC cache. This should be called when target memory changes. // // // Return Value: // S_OK on success. // // Notes: // If this fails, the interface is in an undefined state. // This must be called anytime target memory changes, else all other functions // (besides Destroy) may yield out-of-date or semantically incorrect results. // virtual HRESULT FlushCache() = 0; // // Control DAC's checking of the target's consistency. Specifically, if this is disabled then // ASSERTs in VM code are ignored. The default is disabled, since DAC should do it's best to // return results even with a corrupt or unsyncrhonized target. See // code:ClrDataAccess::TargetConsistencyAssertsEnabled for more details. // // When testing with a non-corrupt and properly syncrhonized target, this should be enabled to // help catch bugs. // // Arguments: // fEnableAsserts - whether ASSERTs should be raised when consistency checks fail (_DEBUG // builds only) // // Notes: // In the future we may want to extend DAC target consistency checks to be retail checks // (exceptions) as well. We'll also need a mechanism for disabling them (eg. when an advanced // user wants to try to get a result anyway even though the target is inconsistent). In that // case we'll want an additional argument here for enabling/disabling the throwing of // consistency failures exceptions (this is independent from asserts - there are legitimate // scenarios for all 4 combinations). // virtual void DacSetTargetConsistencyChecks(bool fEnableAsserts) = 0; // // Destroy the interface object. The client should call this when it's done // with the IDacDbiInterface to free up any resources. // // Return Value: // None. // // Notes: // The client should not call anything else on this interface after Destroy. // virtual void Destroy() = 0; //----------------------------------------------------------------------------- // General purpose target inspection functions //----------------------------------------------------------------------------- // // Query if Left-side is started up? // // // Return Value: // BOOL whether Left-side is intialized. // // Notes: // If the Left-side is not yet started up, then data in the LS is not yet initialized enough // for us to make meaningful queries, but the runtime will fire "Startup Exception" when it is. // // If the left-side is started up, then data is ready. (Although data may be temporarily inconsistent, // see DataSafe). We may still get a Startup Exception in these cases, but it can be ignored. // virtual BOOL IsLeftSideInitialized() = 0; // // Get an LS Appdomain via an AppDomain unique ID. // Fails if the AD is not found or if the ID is invalid. // // Arguments: // appdomainId - "unique appdomain ID". Must be a valid Id. // // Return Value: // VMPTR_AppDomain for the corresponding AppDomain ID. Else throws. // // Notes: // This query is based off the lifespan of the AppDomain from the VM's perspective. // The AppDomainId is most likely obtained from an AppDomain-Created debug events. // An AppDomainId is unique for the lifetime of the VM. // This is the inverse function of GetAppDomainId(). // virtual VMPTR_AppDomain GetAppDomainFromId(ULONG appdomainId) = 0; // // Get the AppDomain ID for an AppDomain. // // Arguments: // vmAppDomain - VM pointer to the AppDomain object of interest // // Return Value: // AppDomain ID for appdomain. Else throws. // // Notes: // An AppDomainId is unique for the lifetime of the VM. It is non-zero. // virtual ULONG GetAppDomainId(VMPTR_AppDomain vmAppDomain) = 0; // // Get the managed AppDomain object for an AppDomain. // // Arguments: // vmAppDomain - VM pointer to the AppDomain object of interest // // Return Value: // objecthandle for the managed app domain object or the Null VMPTR if there is no // object created yet // // Notes: // The AppDomain managed object is lazily constructed on the AppDomain the first time // it is requested. It may be NULL. // virtual VMPTR_OBJECTHANDLE GetAppDomainObject(VMPTR_AppDomain vmAppDomain) = 0; virtual void GetAssemblyFromDomainAssembly(VMPTR_DomainAssembly vmDomainAssembly, OUT VMPTR_Assembly * vmAssembly) = 0; // // Determines whether the runtime security system has assigned full-trust to this assembly. // // Arguments: // vmDomainAssembly - VM pointer to the assembly in question. // // Return Value: // Returns trust status for the assembly. // Throws on error. // // Notes: // Of course trusted malicious code in the process could always cause this API to lie. However, // an assembly loaded without full-trust should have no way of causing this API to return true. // virtual BOOL IsAssemblyFullyTrusted(VMPTR_DomainAssembly vmDomainAssembly) = 0; // // Get the full AD friendly name for the given EE AppDomain. // // Arguments: // vmAppDomain - VM pointer to the AppDomain. // pStrName - required out parameter where the name will be stored. // // Return Value: // None. On success, sets the string via the holder. Throws on error. // This either sets pStrName or Throws. It won't do both. // // Notes: // AD names have an unbounded length. AppDomain friendly names can also change, and // so callers should be prepared to listen for name-change events and requery. // AD names are specified by the user. // virtual void GetAppDomainFullName( VMPTR_AppDomain vmAppDomain, IStringHolder * pStrName) = 0; // // #ModuleNames // // Modules / Assemblies have many different naming schemes: // // 1) Metadata Scope name: All modules have metadata, and each metadata scope has a name assigned // by the creator of that scope (eg, the compiler). This usually is similar to the filename, but could // be arbitrary. // eg: "Foo" // // 2) FileRecord: the File record entry in the manifest module's metadata (table 0x26) for this module. // eg: "Foo" // // 3) Managed module path: This is path that the image was loaded from. Eg, "c:\foo.dll". For non-file // based modules (like in-memory, dynamic), there is no file path. The specific path is determined by // fusion / loader policy. // eg: "c:\foo.dll" // // 4) GAC path: If the module is loaded from the GAC, this is the path on disk into the gac cache that // the image was pulled from. // eg: " // // 5) Ngen path: If the module was ngenned, this is the path on disk into the ngen cache that the image // was pulled from. // eg: // // 6) Fully Qualified Assembly Name: this is an abstract name, which the CLR (fusion / loader) will // resolve (to a filename for file-based modules). Managed apps may need to deal in terms of FQN, // but the debugging services generally avoid them. // eg: "Foo, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089, processorArchitecture=MSIL". // // // Get the "simple name" of a module. This is a heuristic within the CLR to return a simple, // not-well-specified, but meaningful, name for a module. // // Arguments: // vmModule - module to query // pStrFileName - string holder to get simple name. // // Return Value: // None, but pStrFilename will be initialized upon return. // Throws if there was a problem reading the data with DAC or if there is an OOM exception, // in which case no string was stored into pStrFilename. // // Notes: // See code:#ModuleNames for an overview on module names. // // This is really just using code:Module::GetSimpleName. // This gives back a meaningful name, which is generally some combination of the metadata // name of the FileRecord name. This is important because it's valid even when a module // doesn't have a filename. // // The simple name does not have any meaning. It is not a filename, does not necessarily have any // relationship to the filename, and it's not necesarily the metadata name. // Do not use the simple name for anything other than as a pretty string to give the an end user. // virtual void GetModuleSimpleName(VMPTR_Module vmModule, IStringHolder * pStrFilename) = 0; // // Get the full path and file name to the assembly's manifest module. // // Arguments: // vmAssembly - VM pointer to the Assembly. // pStrFilename - required out parameter where the filename will be stored. // // Return Value: // TRUE on success, in which case the filename was stored into pStrFilename // FALSE if the assembly has no filename (eg. for in-memory assemblies), in which // case an empty string was stored into pStrFilename. // Throws if there was a problem reading the data with DAC, in which case // no string was stored into pStrFilename. // // Notes: // See code:#ModuleNames for an overview on module names. // // Normally this is just the filename from which the dll containing the assembly was // loaded. In the case of multi-module assemblies, this is the filename for the // manifest module (the one containing the assembly manifest). For in-memory // assemblies (eg. those loaded from a Byte[], and those created by Reflection.Emit // which will not be saved to disk) there is no filename. In that case this API // returns an empty string. // virtual BOOL GetAssemblyPath(VMPTR_Assembly vmAssembly, IStringHolder * pStrFilename) = 0; // get a type def resolved across modules // Arguments: // input: pTypeRefInfo - domain file and type ref from the referencing module // output: pTargetRefInfo - domain file and type def from the referenced type (this may // come from a module other than the referencing module) // Note: throws virtual void ResolveTypeReference(const TypeRefData * pTypeRefInfo, TypeRefData * pTargetRefInfo) = 0; // // Get the full path and file name to the module (if any). // // Arguments: // vmModule - VM pointer to the module. // pStrFilename - required out parameter where the filename will be stored. // // Return Value: // TRUE on success, in which case the filename was stored into pStrFilename // FALSE the module has no filename (eg. for in-memory assemblies), in which // case an empty string was stored into pStrFilename. // Throws an exception if there was a problem reading the data with DAC, in which case // no string was stored into pStrFilename. // // Notes: // See code:#ModuleNames for an overview on module names. // // Normally this is just the filename from which the module was loaded. // For in-memory module (eg. those loaded from a Byte[], and those created by Reflection.Emit // which will not be saved to disk) there is no filename. In that case this API // returns an empty string. Consider GetModuleSimpleName in those cases. // // We intentionally don't use the function name "GetModuleFileName" here because // winbase #defines that token (along with many others) to have an A or W suffix. // virtual BOOL GetModulePath(VMPTR_Module vmModule, IStringHolder * pStrFilename) = 0; // // Get the full path and file name to the ngen image for the module (if any). // // Arguments: // vmModule - VM pointer to the module. // pStrFilename - required out parameter where the filename will be stored. // // Return Value: // TRUE on success, in which case the filename was stored into pStrFilename // FALSE the module has no filename (eg. for in-memory assemblies), in which // case an empty string was stored into pStrFilename. // Throws an exception if there was a problem reading the data with DAC, in which case // no string was stored into pStrFilename. // // Notes: // See code:#ModuleNames for an overview on module names. // virtual BOOL GetModuleNGenPath(VMPTR_Module vmModule, IStringHolder * pStrFilename) = 0; // Get the metadata for the target module // // Arguments: // vmModule - target module to get metadata for. // pTargetBuffer - Out parameter to get target-buffer for metadata. Gauranteed to be non-empty on // return. This will throw CORDBG_E_MISSING_METADATA hr if the buffer is empty. // This does not gaurantee that the buffer is readable. For example, in a minidump, buffer's // memory may not be present. // // Notes: // Each module's metadata exists as a raw buffer in the target. This finds that target buffer and // returns it. The host can then use OpenScopeOnMemory to create an instance of the metadata in // the host process space. // // For dynamic modules, the CLR will eagerly serialize the metadata at "debuggable" points. This // could be after each type is loaded; or after a bulk update. // For non-dynamic modules (both in-memory and file-based), the metadata exists in the PEAssembly's image. // // Failure cases: // This should succeed in normal, live-debugging scenarios. However, common failure paths here would be: // // 1. Data structures are intact, but Unable to even find the TargetBuffer in the target. In this // case Metadata is truly missing. Likely means: // - target is in the middle of generating metadata for a large bulk operation. (For example, attach // to a TypeLibConverter using Ref.Emit to emit a module for a very large .tlb file). // - corrupted target, // - or the target had some error(out-of-memory?) generating the metadata. // This throws CORDBG_E_MISSING_METADATA. // // 2. Target buffer is found, but memory it describes is not present. Likely means a minidump // scenario with missing memory. Client should use alternative metadata location techniques (such as // an ImagePath to locate the original image and then pulling metadata from that file). // virtual void GetMetadata(VMPTR_Module vmModule, OUT TargetBuffer * pTargetBuffer) = 0; // Definitions for possible symbol formats // This is equivalent to code:ESymbolFormat in the runtime typedef enum { kSymbolFormatNone, // No symbols available kSymbolFormatPDB, // PDB symbol format - use diasymreader.dll } SymbolFormat; // // Get the in-memory symbol (PDB/ILDB) buffer in the target if present. // // Arguments: // vmModule- module to query for. // pTargetBuffer - out parameter to get buffer in target of symbols. If no symbols, pTargetBuffer is empty on return. // pSymbolFormat - out parameter to get the format of the symbols. // // Returns: // 1) If there are in-memory symbols for the given module, pTargetBuffer is set to the buffer describing // the symbols and pSymbolFormat is set to indicate PDB or ILDB format. This buffer can then be read, // converted into an IStream, and passed to ISymUnmanagedBinder::CreateReaderForStream. // 2) If the target is valid, but there is no symbols for the module, then pTargetBuffer->IsEmpty() == true // and *pSymbolFormat == kSymbolFormatNone. // 3) Else, throws exception. // // // Notes: // For file-based modules, PDBs are normally on disk and the debugger retreieves them via a symbol // path without any help from ICorDebug. // However, in some cases, the PDB is stored in-memory and so the debugger needs ICorDebug. Common // cases include: // - dynamic modules generated with reflection-emit. // - in-memory modules loaded by Load(Byte[],Byte[]), which provide the PDB as a byte[]. // - hosted modules where the host (such as SQL) store the PDB. // // In all cases, this can commonly fail. Executable code does not need to have a PDB. virtual void GetSymbolsBuffer(VMPTR_Module vmModule, OUT TargetBuffer * pTargetBuffer, OUT SymbolFormat * pSymbolFormat) = 0; // // Get properties for a module // // Arguments: // vmModule - vm handle to a module // pData - required out parameter which will be filled out with module properties // // Notes: // See definition of DomainAssemblyInfo for more details about what properties // this gives back. virtual void GetModuleData(VMPTR_Module vmModule, OUT ModuleInfo * pData) = 0; // // Get properties for a DomainAssembly // // Arguments: // vmDomainAssembly - vm handle to a DomainAssembly // pData - required out parameter which will be filled out with module properties // // Notes: // See definition of DomainAssemblyInfo for more details about what properties // this gives back. virtual void GetDomainAssemblyData(VMPTR_DomainAssembly vmDomainAssembly, OUT DomainAssemblyInfo * pData) = 0; virtual void GetModuleForDomainAssembly(VMPTR_DomainAssembly vmDomainAssembly, OUT VMPTR_Module * pModule) = 0; //......................................................................... // These methods were the methods that DBI was calling from IXClrData in V2. // We imported them over to this V3 interface so that we can sever all ties between DBI and the // old IXClrData. // // The exact semantics of these are whatever their V2 IXClrData counterpart did. // We may eventually migrate these to their real V3 replacements. //......................................................................... // "types" of addresses. This is taken exactly from the definition, but renamed to match // CLR coding conventions. typedef enum { kAddressUnrecognized, kAddressManagedMethod, kAddressRuntimeManagedCode, kAddressRuntimeUnmanagedCode, kAddressGcData, kAddressRuntimeManagedStub, kAddressRuntimeUnmanagedStub, } AddressType; // // Get the "type" of address. // // Arguments: // address - address to query type. // // Return Value: // Type of address. Throws on error. // // Notes: // This is taken exactly from the IXClrData definition. // This is provided for V3 compatibility to support Interop-debugging. // This should eventually be deprecated. // virtual AddressType GetAddressType(CORDB_ADDRESS address) = 0; // // Query if address is a CLR stub. // // Arguments: // address - Target address to query for. // // // Return Value: // true if the address is a CLR stub. // // Notes: // This is used to implement ICorDebugProcess::IsTransitionStub // This yields true if the address is claimed by a CLR stub manager, or if the IP is in mscorwks. // Conceptually, This should eventually be merged with GetAddressType(). // virtual BOOL IsTransitionStub(CORDB_ADDRESS address) = 0; //......................................................................... // Get the values of the JIT Optimization and EnC flags. // // Arguments: // vmDomainAssembly - (input) VM DomainAssembly (module) for which we are retrieving flags // pfAllowJITOpts - (mandatory output) true iff this is not compiled for debug, // i.e., without optimization // pfEnableEnc - (mandatory output) true iff this module has EnC enabled // // Return Value: // Returns on success. Throws on failure. // // Notes: // This is used to implement both ICorDebugModule2::GetJitCompilerFlags and // ICorDebugCode2::GetCompilerFlags. //......................................................................... virtual void GetCompilerFlags( VMPTR_DomainAssembly vmDomainAssembly, OUT BOOL * pfAllowJITOpts, OUT BOOL * pfEnableEnC) = 0; //......................................................................... // Set the values of the JIT optimization and EnC flags. // // Arguments: // vmDomainAssembly - (input) VM DomainAssembly (module) for which we are retrieving flags // pfAllowJITOpts - (input) true iff this should not be compiled for debug, // i.e., without optimization // pfEnableEnc - (input) true iff this module should have EnC enabled. If this is // false, no change is made to the EnC flags. In other words, once EnC is enabled, // there is no way to disable it. // // Return Value: // S_OK on success and all bits were set. // CORDBG_S_NOT_ALL_BITS_SET - if not all bits are set. Must use GetCompileFlags to // determine which bits were set. // CORDBG_E_CANT_CHANGE_JIT_SETTING_FOR_ZAP_MODULE - if module is ngenned. // Throw on other errors. // // Notes: // Caller can only use this at module-load before any methods are jitted. // This may be called multiple times. // This is used to implement both ICorDebugModule2::SetJitCompilerFlags and // ICorDebugModule::EnableJITDebugging. //......................................................................... virtual HRESULT SetCompilerFlags(VMPTR_DomainAssembly vmDomainAssembly, BOOL fAllowJitOpts, BOOL fEnableEnC) = 0; // // Enumerate all AppDomains in the process. // // Arguments: // fpCallback - callback to invoke on each appdomain // pUserData - user data to supply for each callback. // // Return Value: // Returns on success. Throws on error. // // Notes: // Enumerates all appdomains in the process, including the Default-domain. // Appdomains must show up in this list before the AD Load event is sent, and before // that appdomain is discoverable from the debugger. // See enumeration rules for details. // typedef void (*FP_APPDOMAIN_ENUMERATION_CALLBACK)(VMPTR_AppDomain vmAppDomain, CALLBACK_DATA pUserData); virtual void EnumerateAppDomains(FP_APPDOMAIN_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData) = 0; // // Eunmerate all Assemblies in an appdomain. Enumerations is in load-order // // Arguments: // vmAppDomain - domain in which to enumerate // fpCallback - address to query type. // pUserData - required out parameter for type of address. // // Return Value: // Returns on success. Throws on error. // // Notes: // Enumerates all executable assemblies (both shared and unshared) within an appdomain. // This does not include inspection-only assemblies because those are just data and // not executable (eg, they'll never show up on the stack and you can't set a breakpoint in them). // This enumeration needs to be consistent with load/unload events. // See enumeration rules for details. // // The order of the enumeration is the order the assemblies where loaded. // Ultimately, the debugger needs to be able to tell the user the load // order of assemblies (it can do this with native dlls). Since // managed assembliees don't 1:1 correspond to native dlls, debuggers // need this information from the runtime. // typedef void (*FP_ASSEMBLY_ENUMERATION_CALLBACK)(VMPTR_DomainAssembly vmDomainAssembly, CALLBACK_DATA pUserData); virtual void EnumerateAssembliesInAppDomain(VMPTR_AppDomain vmAppDomain, FP_ASSEMBLY_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData) = 0; // // Callback function for EnumerateModulesInAssembly // // This can throw on error. // // Arguments: // vmModule - new module from the enumeration // pUserData - user data passed to EnumerateModulesInAssembly typedef void (*FP_MODULE_ENUMERATION_CALLBACK)(VMPTR_DomainAssembly vmModule, CALLBACK_DATA pUserData); // // Enumerates all the code Modules in an assembly. // // Arguments: // vmAssembly - assembly to enumerate within // fpCallback - callback function to invoke on each module // pUserData - arbitrary data passed to the callback // // Notes: // This only enumerates "code" modules (ie, modules that have executable code in them). That // includes normal file-based, ngenned, in-memory, and even dynamic modules. // That excludes: // - Resource modules (which have no code or metadata) // - Inspection-only modules. These are viewed as pure data from the debugger's perspective. // virtual void EnumerateModulesInAssembly( VMPTR_DomainAssembly vmAssembly, FP_MODULE_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData) = 0; // // When stopped at an event, request a synchronization. // // // Return Value: // Returns on success. Throws on error. // // Notes: // Call this when an event is dispatched (eg, LoadModule) to request the runtime // synchronize. This does a cooperative sync with the LS. This is not an async break // and can not be called at arbitrary points. // This primitive lets the LS always take the V3 codepath and defer decision making to the RS. // The V2 behavior is to call this after every event (Since that's what V2 did). // The V3 behavior is to never call this. // // If this is called, the LS will sync and we will get a SyncComplete. // // This is also like a precursor to "AsyncBreakAllOtherThreads" // virtual void RequestSyncAtEvent() = 0; // Sets a flag inside LS.Debugger that indicates that // 1. all "first chance exception" events should not be sent to the debugger // 2. "exception handler found" events for exceptions never crossing JMC frames should not be sent to the debugger // // Arguments: // sendExceptionsOutsideOfJMC - new value for the flag Debugger::m_sendExceptionsOutsideOfJMC. // // Return Value: // Returns error code, never throws. // // Note: This call is used by ICorDebugProcess8.EnableExceptionCallbacksOutsideOfMyCode. virtual HRESULT SetSendExceptionsOutsideOfJMC(BOOL sendExceptionsOutsideOfJMC) = 0; // // Notify the debuggee that a debugger atach is pending. // // Arguments: // None // // Return Value: // Returns on success. Throws on error. // // Notes: // Attaching means that CORDebuggerPendingAttach() will now return true. // This doesn't do anything else (eg, no fake events). // // @dbgtodo- still an open Feature-Crew decision how this is exposed publicly. virtual void MarkDebuggerAttachPending() = 0; // // Notify the debuggee that a debugger is attached / detached. // // Arguments: // fAttached - true if we're attaching, false if we're detaching. // // Return Value: // Returns on success. Throws on error. // // Notes: // Attaching means that CorDebuggerAttached() will now return true. // This doesn't do anything else (eg, no fake events). // This lets the V3 codepaths invade the LS to subscribe to events. // // @dbgtodo- still an open Feature-Crew decision how this is exposed publicly. virtual void MarkDebuggerAttached(BOOL fAttached) = 0; // // Hijack a thread. This will effectively do a native func-eval of the thread to set the IP // to a hijack stub and push the parameters. // // Arguments: // dwThreadId - OS thread to hijack. This must be consistent with pRecord and pOriginalContext // pRecord - optional pointer to Exception record. Required if this is hijacked at an exception. // NULL if this is hijacked at a managed IP. // pOriginalContext - optional pointer to buffer to receive the context that the thread is hijacked from. // The caller can use this to either restore the hijack or walk the hijack. // cbSizeContext - size in bytes of buffer pointed to by pContext // reason - reason code for the hijack. The hijack stub can then delegate to the proper hijack. // pUserData - arbitrary data passed through to hijack. This is reason-depedendent. // pRemoteContextAddr - If non-NULL this receives the remote address where the CONTEXT was written in the // in the debuggee. // // Assumptions: // Caller must guarantee this is safe. // This is intended to be used at a thread that either just had an exception or is at a managed IP. // If this is hijacked at an exception, client must cancel the exception (gh / DBG_CONTINUE) // so that the OS exception processing doesn't interfere with the hijack. // // Notes: // Hijack is hard, so we want 1 hijack stub that handles all our hijacking needs. // This lets us share: // - assembly stubs (which are very platform specific) // - hijacking / restoration mechanics, // - making the hijack walkable via the stackwalker. // // Hijacking can be used to implement: func-eval, FE abort, Synchronizing, // dispatching Unhandled Exception notifications. // // Nesting: Since Hijacking passes the key state off to the hijacked thread, (such as original // context to be used with restoring the hijack), the raw hijacking nests just like function // calls. However, the client may need to keep additional state to handle nesting. For example, // nested hijacks will require the client to track multiple CONTEXT*. // // If the thread is in jitted code, then the hijack needs to cooperate with the in-process // stackwalker that the GC uses. It must be in cooperative mode, and push a Frame on the // frame chain to protect the managed frames it hijacked from before it goes to preemptive mode. virtual void Hijack( VMPTR_Thread vmThread, ULONG32 dwThreadId, const EXCEPTION_RECORD * pRecord, T_CONTEXT * pOriginalContext, ULONG32 cbSizeContext, EHijackReason::EHijackReason reason, void * pUserData, CORDB_ADDRESS * pRemoteContextAddr) = 0; // // Callback function for connection enumeration. // // Arguments: // id - the connection ID. // pName - the name of the connection. // pUserData - user data supplied to EnumerateConnections typedef void (*FP_CONNECTION_CALLBACK)(DWORD id, LPCWSTR pName, CALLBACK_DATA pUserData); // // Enumerate all the Connections in the process. // // Arguments: // fpCallback - callback to invoke for each connection // pUserData - random user data to pass to callback. // // Notes: // This enumerates all the connections. The host notifies the debugger of Connections // via the ICLRDebugManager interface. // ICorDebug has no interest in connections. It's merely the transport between the host and the debugger. // Ideally, that transport would be more general. // // V2 Attach would provide faked up CreateConnection, ChangeConnection events on attach. // This enumeration ability allows V3 to emulate that behavior. // // // Enumerate all threads in the target. // // Arguments: // fpCallback - callback function to invoke on each thread. // pUserData - arbitrary user data supplied to each callback. // // Notes: // This enumerates the ThreadStore in the target, which is all the Thread* objects. // This includes threads that have entered the runtime. This may include threads // even before that thread has executed IL and after that thread no longer has managed // code on its stack. // Callback invoked for each thread. typedef void (*FP_THREAD_ENUMERATION_CALLBACK)(VMPTR_Thread vmThread, CALLBACK_DATA pUserData); virtual void EnumerateThreads(FP_THREAD_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData) = 0; // Check if the thread is dead // // Arguments: // vmThread - valid thread to check if it's dead. // // Returns: true if the thread is "dead", which means it can never call managed code again. // // Notes: // #IsThreadMarkedDead // Threads shutdown states are: // 1) Thread is running managed code normally. Thread eventually exits all managed code and // gets to a point where it will never call managed code again. // 2) Thread is marked as dead. // - For threads created outside of the runtime (such as a native thread that wanders into // managed code), this mark can happen in DllMain(ThreadDetach) // - For threads created by the runtime (eg, System.Threading.Thread.Start), this may be done // at the top of the threads stack after it calls the user's Thread-Proc. // 3) MAYBE Native thread exits at this point (or it may not). This would be the common case // for threads created outside the runtime. // 4) Thread exit event is sent. // - For threads created by the runtime, this may be sent at the top of the thread's // stack (or even when we know that the thread will never execute managed code again) // - For threads created outside the runtime, this is more difficult. A thread can // call into managed code and then return, and then call back into managed code at a // later time (The finalizer does this!). So it's not clear when the native thread // actually exits and will never call managed code again. The only hook we have for // this is DllMain(Thread-Detach). We can mark bits in DllMain, but we can't send // debugger notifications (too dangerous from such a restricted context). // So we may mark the thread as dead, but then sweep later (perhaps on the finalizer // thread), and thus send the Exit events later. // 5) Native thread may exit at this point. This is the common case for threads created by // the runtime. // // The underlying native thread may have exited at eitehr #3 or #5. Because of this // flexibility, we don't want to rely on native thread exit events. // This function checks if a Thread is passed state #2 (marked as dead). The key invariant // is that once a thread is marked as dead: // - it can never call managed code again. // - it should not be discoverable by DacDbi enumerations. // // DBI should prefer relying on IsThreadMarkedDead rather than event notifications (either // managed or native) because tracking events requires that DBI maintain state, which means // that attach + dump cases may break. For example, we want a full dump at the ExitThread // event to have the same view as a live process at the ExitThread event. // // We avoid relying on the native thread exit notifications because: // - that's a specific feature of the Win32 debugging API that may not be available on other platforms. // - the only native events the pipeline gets are Exceptions. // // Whether a thread is dead can be inferred from the ICorDebug API. However, we have this // on DacDbi to ensure that this definition is consistent with the other DacDbi methods, // especially the enumeration and discovery rules. virtual bool IsThreadMarkedDead(VMPTR_Thread vmThread) = 0; // // Return the handle of the specified thread. // // Arguments: // vmThread - the specified thread // // Return Value: // the handle of the specified thread // // @dbgtodo- this should go away in V3. This is useless on a dump. virtual HANDLE GetThreadHandle(VMPTR_Thread vmThread) = 0; // // Return the object handle for the managed Thread object corresponding to the specified thread. // // Arguments: // vmThread - the specified thread // // Return Value: // This function returns the object handle for the managed Thread object corresponding to the // specified thread. The return value may be NULL if a managed Thread object has not been created // for the specified thread yet. // virtual VMPTR_OBJECTHANDLE GetThreadObject(VMPTR_Thread vmThread) = 0; // // Get the allocation info corresponding to the specified thread. // // Arguments: // vmThread - the specified thread // threadAllocInfo - the allocated bytes from SOH and UOH so far on this thread // virtual void GetThreadAllocInfo(VMPTR_Thread vmThread, DacThreadAllocInfo* threadAllocInfo) = 0; // // Set and reset the TSNC_DebuggerUserSuspend bit on the state of the specified thread // according to the CorDebugThreadState. // // Arguments: // vmThread - the specified thread // debugState - the desired CorDebugThreadState // virtual void SetDebugState(VMPTR_Thread vmThread, CorDebugThreadState debugState) = 0; // // Returns TRUE if this thread has an unhandled exception // // Arguments: // vmThread - the thread to query // // Return Value // TRUE iff this thread has an unhandled exception // virtual BOOL HasUnhandledException(VMPTR_Thread vmThread) = 0; // // Return the user state of the specified thread. Most of the state are derived from // the ThreadState of the specified thread, e.g. TS_Background, TS_Unstarted, etc. // The exception is USER_UNSAFE_POINT, which we need to do a one-frame stackwalk to figure out. // // Arguments: // vmThread - the specified thread // // Return Value: // the user state of the specified thread // virtual CorDebugUserState GetUserState(VMPTR_Thread vmThread) = 0; // // Returns most of the user state of the specified thread, // i.e. flags which can be derived from the ThreadState: // USER_STOP_REQUESTED, USER_SUSPEND_REQUESTED, USER_BACKGROUND, USER_UNSTARTED // USER_STOPPED, USER_WAIT_SLEEP_JOIN, USER_SUSPENDED, USER_THREADPOOL // // Only USER_UNSAFE_POINT is always set to 0, since it takes additional stackwalk. // If you need USER_UNSAFE_POINT, use GetUserState(VMPTR_Thread); // // Arguments: // vmThread - the specified thread // // Return Value: // the user state of the specified thread // virtual CorDebugUserState GetPartialUserState(VMPTR_Thread vmThread) = 0; // // Return the connection ID of the specified thread. // // Arguments: // vmThread - the specified thread // // Return Value: // the connection ID of the specified thread // virtual CONNID GetConnectionID(VMPTR_Thread vmThread) = 0; // // Return the task ID of the specified thread. // // Arguments: // vmThread - the specified thread // // Return Value: // the task ID of the specified thread // virtual TASKID GetTaskID(VMPTR_Thread vmThread) = 0; // // Return the OS thread ID of the specified thread // // Arguments: // vmThread - the specified thread; cannot be NULL // // Return Value: // the OS thread ID of the specified thread. Returns 0 if not scheduled. // virtual DWORD TryGetVolatileOSThreadID(VMPTR_Thread vmThread) = 0; // // Return the unique thread ID of the specified thread. The value used for the thread ID changes // depending on whether the runtime is being hosted. In non-hosted scenarios, a managed thread will // always be associated with the same native thread, and so we can use the OS thread ID as the thread ID // for the managed thread. In hosted scenarios, however, a managed thread may run on multiple native // threads. It may not even have a backing native thread if it's switched out. Therefore, we can't use // the OS thread ID as the thread ID. Instead, we use the internal managed thread ID. // // Arguments: // vmThread - the specified thread; cannot be NULL // // Return Value: // Returns a stable and unique thread ID for the lifetime of the specified managed thread. // virtual DWORD GetUniqueThreadID(VMPTR_Thread vmThread) = 0; // // Return the object handle to the managed Exception object of the current exception // on the specified thread. The return value could be NULL if there is no current exception. // // Arguments: // vmThread - the specified thread // // Return Value: // This function returns the object handle to the managed Exception object of the current exception. // The return value may be NULL if there is no exception being processed, or if the specified thread // is an unmanaged thread which has entered and exited the runtime. // virtual VMPTR_OBJECTHANDLE GetCurrentException(VMPTR_Thread vmThread) = 0; // // Return the object handle to the managed object for a given CCW pointer. // // Arguments: // ccwPtr - the specified ccw pointer // // Return Value: // This function returns the object handle to the managed object for a given CCW pointer. // virtual VMPTR_OBJECTHANDLE GetObjectForCCW(CORDB_ADDRESS ccwPtr) = 0; // // Return the object handle to the managed CustomNotification object of the current notification // on the specified thread. The return value could be NULL if there is no current notification. // // Arguments: // vmThread - the specified thread on which the notification occurred // // Return Value: // This function returns the object handle to the managed CustomNotification object of the current notification. // The return value may be NULL if there is no current notification. // virtual VMPTR_OBJECTHANDLE GetCurrentCustomDebuggerNotification(VMPTR_Thread vmThread) = 0; // // Return the current appdomain the specified thread is in. // // Arguments: // vmThread - the specified thread // // Return Value: // the current appdomain of the specified thread // // Notes: // This function throws if the current appdomain is NULL for whatever reason. // virtual VMPTR_AppDomain GetCurrentAppDomain(VMPTR_Thread vmThread) = 0; // // Resolve an assembly // // Arguments: // vmScope - module containing metadata that the token is scoped to. // tkAssemblyRef - assembly ref token to lookup. // // Returns: // Assembly that the loader/fusion has bound to the given assembly ref. // Returns NULL if the assembly has not yet been loaded (a common case). // Throws on error. // // Notes: // A single module has metadata that specifies references via tokens. The // loader/fusion goes through tremendous and random policy hoops to determine // which specific file actually gets bound to the reference. This policy includes // things like config files, registry settings, and many other knobs. // // The debugger can't duplicate this policy with 100% accuracy, and // so we need DAC to lookup the assembly that was actually loaded. virtual VMPTR_DomainAssembly ResolveAssembly(VMPTR_DomainAssembly vmScope, mdToken tkAssemblyRef) = 0; //----------------------------------------------------------------------------- // Interface for initializing the native/IL sequence points and native var info // for a function. // Arguments: // input: // vmMethodDesc MethodDesc of the function // startAddr starting address of the function--this serves to // differentiate various EnC versions of the function // fCodePitched indicates whether code for the function has been pitched // fJitComplete indicates whether the function has been jitted // output: // pNativeVarData space for the native code offset information for locals // pSequencePoints space for the IL/native sequence points // Return value: // none, but may throw an exception // Assumptions: // vmMethodDesc, pNativeVarInfo and pSequencePoints are non-NULL // Notes: //----------------------------------------------------------------------------- virtual void GetNativeCodeSequencePointsAndVarInfo(VMPTR_MethodDesc vmMethodDesc, CORDB_ADDRESS startAddress, BOOL fCodeAvailabe, OUT NativeVarData * pNativeVarData, OUT SequencePoints * pSequencePoints) = 0; // // Return the filter CONTEXT on the LS. Once we move entirely over to the new managed pipeline // built on top of the Win32 debugging API, this won't be necessary. // // Arguments: // vmThread - the specified thread // // Return Value: // the filter CONTEXT of the specified thread // // Notes: // This function should go away when everything is moved OOP and // we don't have a filter CONTEXT on the LS anymore. // virtual VMPTR_CONTEXT GetManagedStoppedContext(VMPTR_Thread vmThread) = 0; typedef enum { kInvalid, kManagedStackFrame, kExplicitFrame, kNativeStackFrame, kNativeRuntimeUnwindableStackFrame, kAtEndOfStack, } FrameType; // The stackwalker functions allocate persistent state within DDImpl. Clients can hold onto // this via an opaque StackWalkHandle. typedef void* * StackWalkHandle; // // Create a stackwalker on the specified thread and return a handle to it. // Initially, the stackwalker is at the filter CONTEXT if there is one. // Otherwise it is at the leaf CONTEXT. It DOES NOT fast forward to the first frame of interest. // // Arguments: // vmThread - the specified thread // pInternalContextBuffer - a CONTEXT buffer for the stackwalker to work with // ppSFIHandle - out parameter; return a handle to the stackwalker // // Notes: // Call DeleteStackWalk() to delete the stackwalk buffer. // This is a special case that violates the 'no state' tenant. // virtual void CreateStackWalk(VMPTR_Thread vmThread, DT_CONTEXT * pInternalContextBuffer, OUT StackWalkHandle * ppSFIHandle) = 0; // Delete the stackwalk object created from CreateStackWalk. virtual void DeleteStackWalk(StackWalkHandle ppSFIHandle) = 0; // // Get the CONTEXT of the current frame where the stackwalker is stopped at. // // Arguments: // pSFIHandle - the handle to the stackwalker // pContext - OUT: the CONTEXT to be filled out. The context control flags are ignored. // virtual void GetStackWalkCurrentContext(StackWalkHandle pSFIHandle, DT_CONTEXT * pContext) = 0; // // Set the stackwalker to the given CONTEXT. The CorDebugSetContextFlag indicates whether // the CONTEXT is "active", meaning that the IP is point at the current instruction, // not the return address of some function call. // // Arguments: // vmThread - the current thread // pSFIHandle - the handle to the stackwalker // flag - flag to indicate whether the specified CONTEXT is "active" // pContext - the specified CONTEXT. This may make correctional adjustments to the context's IP. // virtual void SetStackWalkCurrentContext(VMPTR_Thread vmThread, StackWalkHandle pSFIHandle, CorDebugSetContextFlag flag, DT_CONTEXT * pContext) = 0; // // Unwind the stackwalker to the next frame. The next frame could be any actual stack frame, // explicit frame, native marker frame, etc. Call GetStackWalkCurrentFrameInfo() to find out // more about the frame. // // Arguments: // pSFIHandle - the handle to the stackwalker // // Return Value: // Return TRUE if we successfully unwind to the next frame. // Return FALSE if there is no more frames to walk. // Throw on error. // virtual BOOL UnwindStackWalkFrame(StackWalkHandle pSFIHandle) = 0; // // Check whether the specified CONTEXT is valid. The only check we perform right now is whether the // SP in the specified CONTEXT is in the stack range of the thread. // // Arguments: // vmThread - the specified thread // pContext - the CONTEXT to be checked // // Return Value: // Return S_OK if the CONTEXT passes our checks. // Returns CORDBG_E_NON_MATCHING_CONTEXT if the SP in the specified CONTEXT doesn't fall in the stack // range of the thread. // Throws on error. // virtual HRESULT CheckContext(VMPTR_Thread vmThread, const DT_CONTEXT * pContext) = 0; // // Fill in the DebuggerIPCE_STRData structure with information about the current frame // where the stackwalker is stopped at. // // Arguments: // pSFIHandle - the handle to the stackwalker // pFrameData - the DebuggerIPCE_STRData to be filled out; // it can be NULL if you just want to know the frame type // // Return Value: // Return the type of the current frame // virtual FrameType GetStackWalkCurrentFrameInfo(StackWalkHandle pSFIHandle, OPTIONAL DebuggerIPCE_STRData * pFrameData) = 0; // // Return the number of internal frames on the specified thread. // // Arguments: // vmThread - the thread whose internal frames are being retrieved // // Return Value: // Return the number of internal frames. // // Notes: // Explicit frames are "marker objects" the runtime pushes on the stack to mark special places, e.g. // appdomain transition, managed-to- unmanaged transition, etc. Internal frames are only a subset of // explicit frames. Explicit frames which are not interesting to the debugger are not exposed (e.g. // GCFrame). Internal frames are interesting to the debugger if they have a CorDebugInternalFrameType // other than STUBFRAME_NONE. // // The user should call this function before code:IDacDbiInterface::EnumerateInternalFrames to figure // out how many interesting internal frames there are. // virtual ULONG32 GetCountOfInternalFrames(VMPTR_Thread vmThread) = 0; // // Enumerate the internal frames on the specified thread and invoke the provided callback on each of // them. Information about the internal frame is stored in the DebuggerIPCE_STRData. // // Arguments: // vmThread - the thread to be walked fpCallback - callback function invoked on each internal frame // pUserData - user-specified custom data // // Notes: // The user can call code:IDacDbiInterface::GetCountOfInternalFrames to figure out how many internal // frames are on the thread before calling this function. Also, refer to the comment of that function // to find out more about internal frames. // typedef void (*FP_INTERNAL_FRAME_ENUMERATION_CALLBACK)(const DebuggerIPCE_STRData * pFrameData, CALLBACK_DATA pUserData); virtual void EnumerateInternalFrames(VMPTR_Thread vmThread, FP_INTERNAL_FRAME_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData) = 0; // // Given the FramePointer of the parent frame and the FramePointer of the current frame, // check if the current frame is the parent frame. fpParent should have been returned // previously by the DacDbiInterface via GetStackWalkCurrentFrameInfo(). // // Arguments: // fpToCheck - the FramePointer of the current frame // fpParent - the FramePointer of the parent frame; should have been returned earlier by the DDI // // Return Value: // Return TRUE if the current frame is indeed the parent frame // // Note: // Because of the complexity involved in checking for the parent frame, we should always // ask the ExceptionTracker to do it. // virtual BOOL IsMatchingParentFrame(FramePointer fpToCheck, FramePointer fpParent) = 0; // // Return the stack parameter size of a given method. This is necessary on x86 for unwinding. // // Arguments: // controlPC - any address in the specified method; you can use the current PC of the stack frame // // Return Value: // Return the size of the stack parameters of the given method. // Return 0 for vararg methods. // // Assumptions: // The callee stack parameter size is constant throughout a method. // virtual ULONG32 GetStackParameterSize(CORDB_ADDRESS controlPC) = 0; // // Return the FramePointer of the current frame where the stackwalker is stopped at. // // Arguments: // pSFIHandle - the handle to the stackwalker // // Return Value: // the FramePointer of the current frame // // Notes: // The FramePointer of a stack frame is: // the stack address of the return address on x86, // the current SP on AMD64, // // On x86, to get the stack address of the return address, we need to unwind one more frame // and use the SP of the caller frame as the FramePointer of the callee frame. This // function does NOT do that. It just returns the SP. The caller needs to handle the // unwinding. // // The FramePointer of an explicit frame is just the stack address of the explicit frame. // virtual FramePointer GetFramePointer(StackWalkHandle pSFIHandle) = 0; // // Check whether the specified CONTEXT is the CONTEXT of the leaf frame. This function doesn't care // whether the leaf frame is native or managed. // // Arguments: // vmThread - the specified thread // pContext - the CONTEXT to check // // Return Value: // Return TRUE if the specified CONTEXT is the leaf CONTEXT. // // Notes: // Currently we check the specified CONTEXT against the filter CONTEXT first. // This will be deprecated in V3. // virtual BOOL IsLeafFrame(VMPTR_Thread vmThread, const DT_CONTEXT * pContext) = 0; // Get the context for a particular thread of the target process. // Arguments: // input: vmThread - the thread for which the context is required // output: pContextBuffer - the address of the CONTEXT to be initialized. // The memory for this belongs to the caller. It must not be NULL. // Note: throws virtual void GetContext(VMPTR_Thread vmThread, DT_CONTEXT * pContextBuffer) = 0; // // This is a simple helper function to convert a CONTEXT to a DebuggerREGDISPLAY. We need to do this // inside DDI because the RS has no notion of REGDISPLAY. // // Arguments: // pInContext - the CONTEXT to be converted // pOutDRD - the converted DebuggerREGDISPLAY // fActive - Indicate whether the CONTEXT is active or not. An active CONTEXT means that the // IP is the next instruction to be executed, not the return address of a function call. // The opposite of an active CONTEXT is an unwind CONTEXT, which is obtained from // unwinding. // virtual void ConvertContextToDebuggerRegDisplay(const DT_CONTEXT * pInContext, DebuggerREGDISPLAY * pOutDRD, BOOL fActive) = 0; typedef enum { kNone, kILStub, kLCGMethod, } DynamicMethodType; // // Check whether the specified method is an IL stub or an LCG method. This answer determines if we // need to expose the method in a V2-style stackwalk. // // Arguments: // vmMethodDesc - the method to be checked // // Return Value: // Return kNone if the method is neither an IL stub or an LCG method. // Return kILStub if the method is an IL stub. // Return kLCGMethod if the method is an LCG method. // virtual DynamicMethodType IsILStubOrLCGMethod(VMPTR_MethodDesc vmMethodDesc) = 0; // // Return a TargetBuffer for the raw vararg signature. // Also return the address of the first argument in the vararg signature. // // Arguments: // VASigCookieAddr - the target address of the VASigCookie pointer (double indirection) // pArgBase - out parameter; return the target address of the first word of the arguments // // Return Value: // Return a TargetBuffer for the raw vararg signature. // // Notes: // We can't take a VMPTR here because VASigCookieAddr does not come from the DDI. Instead, // we use the native variable information to figure out which stack slot contains the // VASigCookie pointer. So a remote address is all we have got. // // Ideally we should be able to return just a SigParser, but doing so has a not-so-trivial problem. // The memory used for the signature pointed to by the SigParser cannot be allocated in the DAC cache, // since it'll be used by mscordbi. We don't have a clean way to allocate memory in mscordbi without // breaking the Signature abstraction. // // The other option would be to create a new sub-type like "SignatureCopy" which allocates and frees // its own backing memory. Currently we don't want to share heaps between mscordacwks.dll and // mscordbi.dll, and so we would have to jump through some hoops to allocate with an allocator // in mscordbi.dll. // virtual TargetBuffer GetVarArgSig(CORDB_ADDRESS VASigCookieAddr, OUT CORDB_ADDRESS * pArgBase) = 0; // // Indicates if the specified type requires 8-byte alignment. // // Arguments: // thExact - the exact TypeHandle of the type to query // // Return Value: // TRUE if the type requires 8-byte alignment. // virtual BOOL RequiresAlign8(VMPTR_TypeHandle thExact) = 0; // // Resolve the raw generics token to the real generics type token. The resolution is based on the // given index. See Notes below. // // Arguments: // dwExactGenericArgsTokenIndex - the variable index of the generics type token // rawToken - the raw token to be resolved // // Return Value: // Return the actual generics type token. // // Notes: // DDI tells the RS which variable stores the generics type token, but DDI doesn't retrieve the value // of the variable itself. Instead, the RS retrieves the value of the variable. However, // in some cases, the variable value is not the generics type token. In this case, we need to // "resolve" the variable value to the generics type token. The RS should call this API to do that. // // If the index is 0, then the generics type token is the MethodTable of the "this" object. // rawToken will be the address of the "this" object. // // If the index is TYPECTXT_ILNUM, the generics type token is a secret argument. // It could be a MethodDesc or a MethodTable, and in this case no resolution is actually necessary. // rawToken will be the actual secret argument, and this API really is just a nop. // // However, we don't want the RS to know all this logic. // virtual GENERICS_TYPE_TOKEN ResolveExactGenericArgsToken(DWORD dwExactGenericArgsTokenIndex, GENERICS_TYPE_TOKEN rawToken) = 0; //----------------------------------------------------------------------------- // Functions to get information about code objects //----------------------------------------------------------------------------- // GetILCodeAndSig returns the function's ILCode and SigToken given // a module and a token. The info will come from a MethodDesc, if // one exists or from metadata. // // Arguments: // Input: // vmDomainAssembly - module containing metadata for the method // functionToken - metadata token for the function // Output (required): // codeInfo - start address and size of the IL // pLocalSigToken - signature token for the method virtual void GetILCodeAndSig(VMPTR_DomainAssembly vmDomainAssembly, mdToken functionToken, OUT TargetBuffer * pCodeInfo, OUT mdToken * pLocalSigToken) = 0; // Gets information about a native code blob: // it's method desc, whether it's an instantiated generic, its EnC version number // and hot and cold region information. // Arguments: // Input: // vmDomainAssembly - module containing metadata for the method // functionToken - token for the function for which we need code info // Output (required): // pCodeInfo - data structure describing the native code regions. // Notes: If the function is unjitted, the method desc will be NULL and the // output parameter will be invalid. In general, if the native start address // is unavailable for any reason, the output parameter will also be // invalid (i.e., pCodeInfo->IsValid is false). virtual void GetNativeCodeInfo(VMPTR_DomainAssembly vmDomainAssembly, mdToken functionToken, OUT NativeCodeFunctionData * pCodeInfo) = 0; // Gets information about a native code blob: // it's method desc, whether it's an instantiated generic, its EnC version number // and hot and cold region information. // This is similar to function above, just works from a different starting point // Also this version can get info for any particular EnC version instance // because they all have different start addresses whereas the above version gets // the most recent one // Arguments: // Input: // hotCodeStartAddr - the beginning of the code hot code region // Output (required): // pCodeInfo - data structure describing the native code regions. virtual void GetNativeCodeInfoForAddr(VMPTR_MethodDesc vmMethodDesc, CORDB_ADDRESS hotCodeStartAddr, NativeCodeFunctionData * pCodeInfo) = 0; //----------------------------------------------------------------------------- // Functions to get information about types //----------------------------------------------------------------------------- // Determine if a type is a ValueType // // Arguments: // input: vmTypeHandle - the type being checked (works even on unrestored types) // // Return: // TRUE iff the type is a ValueType virtual BOOL IsValueType (VMPTR_TypeHandle th) = 0; // Determine if a type has generic parameters // // Arguments: // input: vmTypeHandle - the type being checked (works even on unrestored types) // // Return: // TRUE iff the type has generic parameters virtual BOOL HasTypeParams (VMPTR_TypeHandle th) = 0; // Get type information for a class // // Arguments: // input: vmAppDomain - appdomain where we will fetch field data for the type // thExact - exact type handle for type // output: // pData - structure containing information about the class and its // fields virtual void GetClassInfo (VMPTR_AppDomain vmAppDomain, VMPTR_TypeHandle thExact, ClassInfo * pData) = 0; // get field information and object size for an instantiated generic // // Arguments: // input: vmDomainAssembly - module containing metadata for the type // thExact - exact type handle for type (may be NULL) // thApprox - approximate type handle for the type // output: // pFieldList - array of structures containing information about the fields. Clears any previous // contents. Allocated and initialized by this function. // pObjectSize - size of the instantiated object // virtual void GetInstantiationFieldInfo (VMPTR_DomainAssembly vmDomainAssembly, VMPTR_TypeHandle vmThExact, VMPTR_TypeHandle vmThApprox, OUT DacDbiArrayList<FieldData> * pFieldList, OUT SIZE_T * pObjectSize) = 0; // use a type handle to get the information needed to create the corresponding RS CordbType instance // // Arguments: // input: boxed - indicates what, if anything, is boxed. See code:AreValueTypesBoxed for more // specific information // vmAppDomain - module containing metadata for the type // vmTypeHandle - type handle for the type // output: pTypeInfo - holds information needed to build the corresponding CordbType // virtual void TypeHandleToExpandedTypeInfo(AreValueTypesBoxed boxed, VMPTR_AppDomain vmAppDomain, VMPTR_TypeHandle vmTypeHandle, DebuggerIPCE_ExpandedTypeData * pTypeInfo) = 0; virtual void GetObjectExpandedTypeInfo(AreValueTypesBoxed boxed, VMPTR_AppDomain vmAppDomain, CORDB_ADDRESS addr, OUT DebuggerIPCE_ExpandedTypeData * pTypeInfo) = 0; virtual void GetObjectExpandedTypeInfoFromID(AreValueTypesBoxed boxed, VMPTR_AppDomain vmAppDomain, COR_TYPEID id, OUT DebuggerIPCE_ExpandedTypeData * pTypeInfo) = 0; // Get type handle for a TypeDef token, if one exists. For generics this returns the open type. // Note there is no guarantee the returned handle will be fully restored (in pre-jit scenarios), // only that it exists. Later functions that use this type handle should fail if they require // information not yet available at the current restoration level // // Arguments: // input: vmModule - the module scope in which to look up the type def // metadataToken - the type definition to retrieve // // Return value: the type handle if it exists or throws CORDBG_E_CLASS_NOT_LOADED if it isn't loaded // virtual VMPTR_TypeHandle GetTypeHandle(VMPTR_Module vmModule, mdTypeDef metadataToken) = 0; // Get the approximate type handle for an instantiated type. This may be identical to the exact type handle, // but if we have code sharing for generics, it may differ in that it may have canonical type parameters. // This will occur if we have not yet loaded an exact type but we have loaded the canonical form of the // type. // // Arguments: // input: pTypeData - information needed to get the type handle, this includes a list of type parameters // and the number of entries in the list. Allocated and initialized by the caller. // Return value: the approximate type handle // virtual VMPTR_TypeHandle GetApproxTypeHandle(TypeInfoList * pTypeData) = 0; // Get the exact type handle from type data. // Arguments: // input: pTypeData - type information for the type. includes information about // the top-level type as well as information // about the element type for array types, the referent for // pointer types, or actual parameters for generic class or // valuetypes, as appropriate for the top-level type. // pArgInfo - This is preallocated and initialized by the caller and contains two fields: // genericArgsCount - number of type parameters (these may be actual type parameters // for generics or they may represent the element type or referent // type. // pGenericArgData - list of type parameters // vmTypeHandle - the exact type handle derived from the type information // Return Value: an HRESULT indicating the result of the operation virtual HRESULT GetExactTypeHandle(DebuggerIPCE_ExpandedTypeData * pTypeData, ArgInfoList * pArgInfo, VMPTR_TypeHandle& vmTypeHandle) = 0; // // Retrieve the generic type params for a given MethodDesc. This function is specifically // for stackwalking because it requires the generic type token on the stack. // // Arguments: // vmAppDomain - the appdomain of the MethodDesc // vmMethodDesc - the method in question // genericsToken - the generic type token in the stack frame owned by the method // // pcGenericClassTypeParams - out parameter; returns the number of type parameters for the class // containing the method in question; must not be NULL // pGenericTypeParams - out parameter; returns an array of type parameters and // the count of the total number of type parameters; must not be NULL // // Notes: // The memory for the array is allocated by this function on the Dbi heap. // The caller is responsible for releasing it. // virtual void GetMethodDescParams(VMPTR_AppDomain vmAppDomain, VMPTR_MethodDesc vmMethodDesc, GENERICS_TYPE_TOKEN genericsToken, OUT UINT32 * pcGenericClassTypeParams, OUT TypeParamsList * pGenericTypeParams) = 0; // Get the target field address of a thread local static. // Arguments: // input: vmField - pointer to the field descriptor for the static field // vmRuntimeThread - thread to which the static field belongs. This must // NOT be NULL // Return Value: The target address of the field if the field is allocated. // NULL if the field storage is not yet allocated. // // Note: // Static field storage is lazily allocated, so this may commonly return NULL. // This is an inspection only method and can not allocate the static storage. // Field storage is constant once allocated, so this value can be cached. virtual CORDB_ADDRESS GetThreadStaticAddress(VMPTR_FieldDesc vmField, VMPTR_Thread vmRuntimeThread) = 0; // Get the target field address of a collectible types static. // Arguments: // input: vmField - pointer to the field descriptor for the static field // vmAppDomain - AppDomain to which the static field belongs. This must // NOT be NULL // Return Value: The target address of the field if the field is allocated. // NULL if the field storage is not yet allocated. // // Note: // Static field storage may not exist yet, so this may commonly return NULL. // This is an inspection only method and can not allocate the static storage. // Field storage is not constant once allocated so this value can not be cached // across a Continue virtual CORDB_ADDRESS GetCollectibleTypeStaticAddress(VMPTR_FieldDesc vmField, VMPTR_AppDomain vmAppDomain) = 0; // Get information about a field added with Edit And Continue. // Arguments: // intput: pEnCFieldInfo - information about the EnC added field including: // object to which it belongs (if this is null the field is static) // the field token // the class token for the class to which the field was added // the offset to the fields // the domain file // an indication of the type: whether it's a class or value type // output: pFieldData - information about the EnC added field // pfStatic - flag to indicate whether the field is static virtual void GetEnCHangingFieldInfo(const EnCHangingFieldInfo * pEnCFieldInfo, OUT FieldData * pFieldData, OUT BOOL * pfStatic) = 0; // GetTypeHandleParams gets the necessary data for a type handle, i.e. its // type parameters, e.g. "String" and "List<int>" from the type handle // for "Dict<String,List<int>>", and sends it back to the right side. // Arguments: // input: vmAppDomain - app domain to which the type belongs // vmTypeHandle - type handle for the type // output: pParams - list of instances of DebuggerIPCE_ExpandedTypeData, // one for each type parameter. These will be used on the // RS to build up an instantiation which will allow // building an instance of CordbType for the top-level // type. The memory for this list is allocated on the dbi // heap in this function. // This will not fail except for OOM virtual void GetTypeHandleParams(VMPTR_AppDomain vmAppDomain, VMPTR_TypeHandle vmTypeHandle, OUT TypeParamsList * pParams) = 0; // GetSimpleType // gets the metadata token and domain file corresponding to a simple type // Arguments: // input: vmAppDomain - Appdomain in which simpleType resides // simpleType - CorElementType value corresponding to a simple type // output: pMetadataToken - the metadata token corresponding to simpleType, // in the scope of vmDomainAssembly. // vmDomainAssembly - the domainAssembly for simpleType // Notes: // This is inspection-only. If the type is not yet loaded, it will throw CORDBG_E_CLASS_NOT_LOADED. // It will not try to load a type. // If the type has been loaded, vmDomainAssembly will be non-null unless the target is somehow corrupted. // In that case, we will throw CORDBG_E_TARGET_INCONSISTENT. virtual void GetSimpleType(VMPTR_AppDomain vmAppDomain, CorElementType simpleType, OUT mdTypeDef * pMetadataToken, OUT VMPTR_Module * pVmModule, OUT VMPTR_DomainAssembly * pVmDomainAssembly) = 0; // for the specified object returns TRUE if the object derives from System.Exception virtual BOOL IsExceptionObject(VMPTR_Object vmObject) = 0; // gets the list of raw stack frames for the specified exception object virtual void GetStackFramesFromException(VMPTR_Object vmObject, DacDbiArrayList<DacExceptionCallStackData>& dacStackFrames) = 0; // Returns true if the argument is a runtime callable wrapper virtual BOOL IsRcw(VMPTR_Object vmObject) = 0; // retrieves the list of COM interfaces implemented by vmObject, as it is known at // the time of the call (the list may change as new interface types become available // in the runtime) virtual void GetRcwCachedInterfaceTypes( VMPTR_Object vmObject, VMPTR_AppDomain vmAppDomain, BOOL bIInspectableOnly, OUT DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> * pDacInterfaces) = 0; // retrieves the list of interfaces pointers implemented by vmObject, as it is known at // the time of the call (the list may change as new interface types become available // in the runtime) virtual void GetRcwCachedInterfacePointers( VMPTR_Object vmObject, BOOL bIInspectableOnly, OUT DacDbiArrayList<CORDB_ADDRESS> * pDacItfPtrs) = 0; // retrieves a list of interface types corresponding to the passed in // list of IIDs. the interface types are retrieved from an app domain // IID / Type cache, that is updated as new types are loaded. will // have NULL entries corresponding to unknown IIDs in "iids" virtual void GetCachedWinRTTypesForIIDs( VMPTR_AppDomain vmAppDomain, DacDbiArrayList<GUID> & iids, OUT DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> * pTypes) = 0; // retrieves the whole app domain cache of IID / Type mappings. virtual void GetCachedWinRTTypes( VMPTR_AppDomain vmAppDomain, OUT DacDbiArrayList<GUID> * piids, OUT DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> * pTypes) = 0; // ---------------------------------------------------------------------------- // functions to get information about reference/handle referents for ICDValue // ---------------------------------------------------------------------------- // Get object information for a TypedByRef object. Initializes the objRef and typedByRefType fields of // pObjectData (type info for the referent). // Arguments: // input: pTypedByRef - pointer to a TypedByRef struct // vmAppDomain - AppDomain for the type of the object referenced // output: pObjectData - information about the object referenced by pTypedByRef // Note: Throws virtual void GetTypedByRefInfo(CORDB_ADDRESS pTypedByRef, VMPTR_AppDomain vmAppDomain, DebuggerIPCE_ObjectData * pObjectData) = 0; // Get the string length and offset to string base for a string object // Arguments: // input: objPtr - address of a string object // output: pObjectData - fills in the string fields stringInfo.offsetToStringBase and // stringInfo.length // Note: throws virtual void GetStringData(CORDB_ADDRESS objectAddress, DebuggerIPCE_ObjectData * pObjectData) = 0; // Get information for an array type referent of an objRef, including rank, upper and lower bounds, // element size and type, and the number of elements. // Arguments: // input: objectAddress - the address of an array object // output: pObjectData - fills in the array-related fields: // arrayInfo.offsetToArrayBase, // arrayInfo.offsetToLowerBounds, // arrayInfo.offsetToUpperBounds, // arrayInfo.componentCount, // arrayInfo.rank, // arrayInfo.elementSize, // Note: throws virtual void GetArrayData(CORDB_ADDRESS objectAddress, DebuggerIPCE_ObjectData * pObjectData) = 0; // Get information about an object for which we have a reference, including the object size and // type information. // Arguments: // input: objectAddress - address of the object for which we want information // type - the basic type of the object (we may find more specific type // information for the object) // vmAppDomain - the appdomain to which the object belong // output: pObjectData - fills in the size and type information fields // Note: throws virtual void GetBasicObjectInfo(CORDB_ADDRESS objectAddress, CorElementType type, VMPTR_AppDomain vmAppDomain, DebuggerIPCE_ObjectData * pObjectData) = 0; // -------------------------------------------------------------------------------------------- #ifdef TEST_DATA_CONSISTENCY // Determine whether a crst is held by the left side. When the DAC is executing VM code that takes a // lock, we want to know whether the LS already holds that lock. If it does, we will assume the locked // data is in an inconsistent state and will throw an exception, rather than relying on this data. This // function is part of a self-test that will ensure we are correctly detecting when the LS holds a lock // on data the RS is trying to inspect. // Argument: // input: vmCrst - the lock to test // output: none // Notes: // Throws // For this code to run, the environment variable TestDataConsistency must be set to 1. virtual void TestCrst(VMPTR_Crst vmCrst) = 0; // Determine whether a crst is held by the left side. When the DAC is executing VM code that takes a // lock, we want to know whether the LS already holds that lock. If it does, we will assume the locked // data is in an inconsistent state and will throw an exception, rather than relying on this data. This // function is part of a self-test that will ensure we are correctly detecting when the LS holds a lock // on data the RS is trying to inspect. // Argument: // input: vmRWLock - the lock to test // output: none // Notes: // Throws // For this code to run, the environment variable TestDataConsistency must be set to 1. virtual void TestRWLock(VMPTR_SimpleRWLock vmRWLock) = 0; #endif // -------------------------------------------------------------------------------------------- // Get the address of the Debugger control block on the helper thread. The debugger control block // contains information about the status of the debugger, handles to various events and space to hold // information sent back and forth between the debugger and the debuggee's helper thread. // Arguments: none // Return Value: The remote address of the Debugger control block allocated on the helper thread // if it has been successfully allocated or NULL otherwise. virtual CORDB_ADDRESS GetDebuggerControlBlockAddress() = 0; // Creates a VMPTR of an Object. The Object is found by dereferencing ptr // as though it is a target address to an OBJECTREF. This is similar to // GetObject with another level of indirection. // // Arguments: // ptr - A target address pointing to an OBJECTREF // // Return Value: // A VMPTR to the Object which ptr points to // // Notes: // The VMPTR this produces can be deconstructed by GetObjectContents. // This function will throw if given a NULL or otherwise invalid pointer, // but if given a valid address to an invalid pointer, it will produce // a VMPTR_Object which points to invalid memory. virtual VMPTR_Object GetObjectFromRefPtr(CORDB_ADDRESS ptr) = 0; // Creates a VMPTR of an Object. The Object is assumed to be at the target // address supplied by ptr // // Arguments: // ptr - A target address to an Object // // Return Value: // A VMPTR to the Object which was at ptr // // Notes: // The VMPTR this produces can be deconstructed by GetObjectContents. // This will produce a VMPTR_Object regardless of whether the pointer is // valid or not. virtual VMPTR_Object GetObject(CORDB_ADDRESS ptr) = 0; // Sets state in the native binder. // // Arguments: // ePolicy - the NGEN policy to change // // Return Value: // HRESULT indicating if the state was successfully updated // virtual HRESULT EnableNGENPolicy(CorDebugNGENPolicy ePolicy) = 0; // Sets the NGEN compiler flags. This restricts NGEN to only use images with certain // types of pregenerated code. With respect to debugging this is used to specify that // the NGEN image must be debuggable aka non-optimized code. Note that these flags // are merged with other sources of configuration so it is possible that the final // result retrieved from GetDesiredNGENCompilerFlags does not match what was specfied // in this call. // // If an NGEN image of the appropriate type isn't available then one of two things happens: // a) the NGEN image isn't loaded and CLR loads the MSIL image instead // b) the NGEN image is loaded, but we don't use the pregenerated code it contains // and instead use only the MSIL and metadata // // This function is only legal to call at app startup before any decisions have been // made about NGEN image loading. Once we begin loading this configuration is immutable. // // // Arguments: // dwFlags - the new NGEN compiler flags that should go into effect // // Return Value: // HRESULT indicating if the state was successfully updated. On error the // current flags in effect will not have changed. // virtual HRESULT SetNGENCompilerFlags(DWORD dwFlags) = 0; // Gets the NGEN compiler flags currently in effect. This accounts for settings that // were caused by SetDesiredNGENCompilerFlags as well as other configuration sources. // See SetDesiredNGENCompilerFlags for more info // // Arguments: // pdwFlags - the NGEN compiler flags currently in effect // // Return Value: // HRESULT indicating if the state was successfully retrieved. // virtual HRESULT GetNGENCompilerFlags(DWORD *pdwFlags) = 0; // Create a VMPTR_OBJECTHANDLE from a CORDB_ADDRESS pointing to an object handle // // Arguments: // handle: target address of a GC handle // // ReturnValue: // returns a VMPTR_OBJECTHANDLE with the handle as the m_addr field // // Notes: // This will produce a VMPTR_OBJECTHANDLE regardless of whether handle is // valid. // Ideally we'd be using only strongly-typed variables on the RS, and then this would be unnecessary virtual VMPTR_OBJECTHANDLE GetVmObjectHandle(CORDB_ADDRESS handleAddress) = 0; // Validate that the VMPTR_OBJECTHANDLE refers to a legitimate managed object // // Arguments: // handle: the GC handle to be validated // // Return value: // TRUE if the object appears to be valid (its a heuristic), FALSE if it definately is not valid // virtual BOOL IsVmObjectHandleValid(VMPTR_OBJECTHANDLE vmHandle) = 0; // indicates if the specified module is a WinRT module // // Arguments: // vmModule: the module to check // isWinRT: out parameter indicating state of module // // Return value: // S_OK indicating that the operation succeeded // virtual HRESULT IsWinRTModule(VMPTR_Module vmModule, BOOL& isWinRT) = 0; // Determines the app domain id for the object refered to by a given VMPTR_OBJECTHANDLE // // Arguments: // handle: the GC handle which refers to the object of interest // // Return value: // The app domain id of the object of interest // // This may throw if the object handle is corrupt (it doesn't refer to a managed object) virtual ULONG GetAppDomainIdFromVmObjectHandle(VMPTR_OBJECTHANDLE vmHandle) = 0; // Get the target address from a VMPTR_OBJECTHANDLE, i.e., the handle address // Arguments: // vmHandle - (input) the VMPTR_OBJECTHANDLE from which we need the target address // Return value: the target address from the VMPTR_OBJECTHANDLE // virtual CORDB_ADDRESS GetHandleAddressFromVmHandle(VMPTR_OBJECTHANDLE vmHandle) = 0; // Given a VMPTR to an Object return the target address // // Arguments: // obj - the Object VMPTR to get the address from // // Return Value: // Return the target address which obj is using // // Notes: // The VMPTR this consumes can be reconstructed using GetObject and // providing the address stored in the returned TargetBuffer. This has // undefined behavior for invalid VMPTR_Objects. virtual TargetBuffer GetObjectContents(VMPTR_Object obj) = 0; // The callback used to enumerate blocking objects typedef void (*FP_BLOCKINGOBJECT_ENUMERATION_CALLBACK)(DacBlockingObject blockingObject, CALLBACK_DATA pUserData); // // Enumerate all monitors blocking a thread // // Arguments: // vmThread - the thread to get monitor data for // fpCallback - callback to invoke on the blocking data for each monitor // pUserData - user data to supply for each callback. // // Return Value: // Returns on success. Throws on error. // // virtual void EnumerateBlockingObjects(VMPTR_Thread vmThread, FP_BLOCKINGOBJECT_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData) = 0; // // Returns the thread which owns the monitor lock on an object and the acquisition // count // // Arguments: // vmObject - The object to check for ownership // // Return Value: // Throws on error. Inside the structure we have: // pVmThread - the owning or thread or VMPTR_Thread::NullPtr() if unowned // pAcquisitionCount - the number of times the lock would need to be released in // order for it to be unowned // virtual MonitorLockInfo GetThreadOwningMonitorLock(VMPTR_Object vmObject) = 0; // // Enumerate all threads waiting on the monitor event for an object // // Arguments: // vmObject - the object whose monitor event we are interested in // fpCallback - callback to invoke on each thread in the queue // pUserData - user data to supply for each callback. // // Return Value: // Returns on success. Throws on error. // // virtual void EnumerateMonitorEventWaitList(VMPTR_Object vmObject, FP_THREAD_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData) = 0; // // Returns the managed debugging flags for the process (a combination // of the CLR_DEBUGGING_PROCESS_FLAGS flags). This function specifies, // beyond whether or not a managed debug event is pending, also if the // event (if one exists) is caused by a Debugger.Launch(). This is // important b/c Debugger.Launch calls should *NOT* cause the debugger // to terminate the process when the attach is canceled. virtual CLR_DEBUGGING_PROCESS_FLAGS GetAttachStateFlags() = 0; virtual bool GetMetaDataFileInfoFromPEFile(VMPTR_PEAssembly vmPEAssembly, DWORD & dwTimeStamp, DWORD & dwImageSize, bool & isNGEN, IStringHolder* pStrFilename) = 0; virtual bool GetILImageInfoFromNgenPEFile(VMPTR_PEAssembly vmPEAssembly, DWORD & dwTimeStamp, DWORD & dwSize, IStringHolder* pStrFilename) = 0; virtual bool IsThreadSuspendedOrHijacked(VMPTR_Thread vmThread) = 0; typedef void* * HeapWalkHandle; // Returns true if it is safe to walk the heap. If this function returns false, // you could still create a heap walk and attempt to walk it, but there's no // telling how much of the heap will be available. virtual bool AreGCStructuresValid() = 0; // Creates a HeapWalkHandle which can be used to walk the managed heap with the // WalkHeap function. Note if this function completes successfully you will need // to delete the handle by passing it into DeleteHeapWalk. // // Arguments: // pHandle - the location to store the heap walk handle in // // Returns: // S_OK on success, an error code on failure. virtual HRESULT CreateHeapWalk(OUT HeapWalkHandle * pHandle) = 0; // Deletes the give HeapWalkHandle. Note you must call this function if // CreateHeapWalk returns success. virtual void DeleteHeapWalk(HeapWalkHandle handle) = 0; // Walks the heap using the given heap walk handle, enumerating objects // on the managed heap. Note that walking the heap requires that the GC // data structures be in a valid state, which you can find by calling // AreGCStructuresValid. // // Arguments: // handle - a HeapWalkHandle obtained from CreateHeapWalk // count - the number of object addresses to obtain; pValues must // be at least as large as count // objects - the location to stuff the object addresses found during // the heap walk; this array should be at least "count" in // length; this field must not be null // pFetched - a location to store the actual number of values filled // into pValues; this field must not be null // // Returns: // S_OK on success, a failure HRESULT otherwise. // // Note: // You should iteratively call WalkHeap requesting more values until // *pFetched != count.. This signifies that we have reached the end // of the heap walk. virtual HRESULT WalkHeap(HeapWalkHandle handle, ULONG count, OUT COR_HEAPOBJECT * objects, OUT ULONG * pFetched) = 0; virtual HRESULT GetHeapSegments(OUT DacDbiArrayList<COR_SEGMENT> * pSegments) = 0; virtual bool IsValidObject(CORDB_ADDRESS obj) = 0; virtual bool GetAppDomainForObject(CORDB_ADDRESS obj, OUT VMPTR_AppDomain * pApp, OUT VMPTR_Module * pModule, OUT VMPTR_DomainAssembly * pDomainAssembly) = 0; // Reference Walking. // Creates a reference walk. // Parameters: // pHandle - out - the reference walk handle to create // walkStacks - in - whether or not to report stack references // walkFQ - in - whether or not to report references from the finalizer queue // handleWalkMask - in - the types of handles report (see CorGCReferenceType, cordebug.idl) // Returns: // An HRESULT indicating whether it succeded or failed. // Exceptions: // Does not throw, but does not catch exceptions either. virtual HRESULT CreateRefWalk(OUT RefWalkHandle * pHandle, BOOL walkStacks, BOOL walkFQ, UINT32 handleWalkMask) = 0; // Deletes a reference walk. // Parameters: // handle - in - the handle of the reference walk to delete // Excecptions: // Does not throw, but does not catch exceptions either. virtual void DeleteRefWalk(RefWalkHandle handle) = 0; // Enumerates GC references in the process based on the parameters passed to CreateRefWalk. // Parameters: // handle - in - the RefWalkHandle to enumerate // count - in - the capacity of "refs" // refs - in/out - an array to write the references to // pFetched - out - the number of references written virtual HRESULT WalkRefs(RefWalkHandle handle, ULONG count, OUT DacGcReference * refs, OUT ULONG * pFetched) = 0; virtual HRESULT GetTypeID(CORDB_ADDRESS obj, COR_TYPEID * pType) = 0; virtual HRESULT GetTypeIDForType(VMPTR_TypeHandle vmTypeHandle, COR_TYPEID *pId) = 0; virtual HRESULT GetObjectFields(COR_TYPEID id, ULONG32 celt, OUT COR_FIELD * layout, OUT ULONG32 * pceltFetched) = 0; virtual HRESULT GetTypeLayout(COR_TYPEID id, COR_TYPE_LAYOUT * pLayout) = 0; virtual HRESULT GetArrayLayout(COR_TYPEID id, COR_ARRAY_LAYOUT * pLayout) = 0; virtual void GetGCHeapInformation(OUT COR_HEAPINFO * pHeapInfo) = 0; // If a PEAssembly has an RW capable IMDInternalImport, this returns the address of the MDInternalRW // object which implements it. // // // Arguments: // vmPEAssembly - target PEAssembly to get metadata MDInternalRW for. // pAddrMDInternalRW - If a PEAssembly has an RW capable IMDInternalImport, this will be set to the address // of the MDInternalRW object which implements it. Otherwise it will be NULL. // virtual HRESULT GetPEFileMDInternalRW(VMPTR_PEAssembly vmPEAssembly, OUT TADDR* pAddrMDInternalRW) = 0; // DEPRECATED - use GetActiveRejitILCodeVersionNode // Retrieves the active ReJitInfo for a given module/methodDef, if it exists. // Active is defined as after GetReJitParameters returns from the profiler dll and // no call to Revert has completed yet. // // // Arguments: // vmModule - The module to search in // methodTk - The methodDef token indicates the method within the module to check // pReJitInfo - [out] The RejitInfo request, if any, that is active on this method. If no request // is active this will be pReJitInfo->IsNull() == TRUE. // // Returns: // S_OK regardless of whether a rejit request is active or not, as long as the answer is certain // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible // virtual HRESULT GetReJitInfo(VMPTR_Module vmModule, mdMethodDef methodTk, OUT VMPTR_ReJitInfo* pReJitInfo) = 0; // DEPRECATED - use GetNativeCodeVersionNode // Retrieves the ReJitInfo for a given MethodDesc/code address, if it exists. // // // Arguments: // vmMethod - The method to look for // codeStartAddress - The code start address disambiguates between multiple rejitted instances // of the method. // pReJitInfo - [out] The RejitInfo request that corresponds to this MethodDesc/code address, if it exists. // NULL otherwise. // // Returns: // S_OK regardless of whether a rejit request is active or not, as long as the answer is certain // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible // virtual HRESULT GetReJitInfo(VMPTR_MethodDesc vmMethod, CORDB_ADDRESS codeStartAddress, OUT VMPTR_ReJitInfo* pReJitInfo) = 0; // DEPRECATED - use GetILCodeVersion // Retrieves the SharedReJitInfo for a given ReJitInfo. // // // Arguments: // vmReJitInfo - The ReJitInfo to inspect // pSharedReJitInfo - [out] The SharedReJitInfo that is pointed to by vmReJitInfo. // // Returns: // S_OK if no error // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible // virtual HRESULT GetSharedReJitInfo(VMPTR_ReJitInfo vmReJitInfo, VMPTR_SharedReJitInfo* pSharedReJitInfo) = 0; // DEPRECATED - use GetILCodeVersionData // Retrieves useful data from a SharedReJitInfo such as IL code and IL mapping. // // // Arguments: // sharedReJitInfo - The SharedReJitInfo to inspect // pData - [out] Various properties of the SharedReJitInfo such as IL code and IL mapping. // // Returns: // S_OK if no error // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible // virtual HRESULT GetSharedReJitInfoData(VMPTR_SharedReJitInfo sharedReJitInfo, DacSharedReJitInfo* pData) = 0; // Retrieves a bit field indicating which defines were in use when clr was built. This only includes // defines that are specified in the Debugger::_Target_Defines enumeration, which is a small subset of // all defines. // // // Arguments: // pDefines - [out] The set of defines clr.dll was built with. Bit offsets are encoded using the // enumeration Debugger::_Target_Defines // // Returns: // S_OK if no error // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible // virtual HRESULT GetDefinesBitField(ULONG32 *pDefines) = 0; // Retrieves a version number indicating the shape of the data structures used in the Metadata implementation // inside clr.dll. This number changes anytime a datatype layout changes so that they can be correctly // deserialized from out of process // // // Arguments: // pMDStructuresVersion - [out] The layout version number for metadata data structures. See // Debugger::Debugger() in Debug\ee\Debugger.cpp for a description of the options. // // Returns: // S_OK if no error // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible // virtual HRESULT GetMDStructuresVersion(ULONG32* pMDStructuresVersion) = 0; // Retrieves the active rejit ILCodeVersionNode for a given module/methodDef, if it exists. // Active is defined as after GetReJitParameters returns from the profiler dll and // no call to Revert has completed yet. // // // Arguments: // vmModule - The module to search in // methodTk - The methodDef token indicates the method within the module to check // pILCodeVersionNode - [out] The Rejit request, if any, that is active on this method. If no request // is active this will be pILCodeVersionNode->IsNull() == TRUE. // // Returns: // S_OK regardless of whether a rejit request is active or not, as long as the answer is certain // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible // virtual HRESULT GetActiveRejitILCodeVersionNode(VMPTR_Module vmModule, mdMethodDef methodTk, OUT VMPTR_ILCodeVersionNode* pVmILCodeVersionNode) = 0; // Retrieves the NativeCodeVersionNode for a given MethodDesc/code address, if it exists. // NOTE: The initial (default) code generated for a MethodDesc is a valid MethodDesc/code address pair but it won't have a corresponding // NativeCodeVersionNode. // // // Arguments: // vmMethod - The method to look for // codeStartAddress - The code start address disambiguates between multiple jitted instances of the method. // pVmNativeCodeVersionNode - [out] The NativeCodeVersionNode request that corresponds to this MethodDesc/code address, if it exists. // NULL otherwise. // // Returns: // S_OK regardless of whether a rejit request is active or not, as long as the answer is certain // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible // virtual HRESULT GetNativeCodeVersionNode(VMPTR_MethodDesc vmMethod, CORDB_ADDRESS codeStartAddress, OUT VMPTR_NativeCodeVersionNode* pVmNativeCodeVersionNode) = 0; // Retrieves the ILCodeVersionNode for a given NativeCodeVersionNode. // This may return a NULL node if the native code belongs to the default IL version for this this method. // // // Arguments: // vmNativeCodeVersionNode - The NativeCodeVersionNode to inspect // pVmILCodeVersionNode - [out] The ILCodeVersionNode that is pointed to by vmNativeCodeVersionNode, if any. // // Returns: // S_OK if no error // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible // virtual HRESULT GetILCodeVersionNode(VMPTR_NativeCodeVersionNode vmNativeCodeVersionNode, VMPTR_ILCodeVersionNode* pVmILCodeVersionNode) = 0; // Retrieves useful data from an ILCodeVersion such as IL code and IL mapping. // // // Arguments: // ilCodeVersionNode - The ILCodeVersionNode to inspect // pData - [out] Various properties of the ILCodeVersionNode such as IL code and IL mapping. // // Returns: // S_OK if no error // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible // virtual HRESULT GetILCodeVersionNodeData(VMPTR_ILCodeVersionNode ilCodeVersionNode, DacSharedReJitInfo* pData) = 0; // Enable or disable the GC notification events. The GC notification events are turned off by default // They will be delivered through ICorDebugManagedCallback4 // // // Arguments: // fEnable - true to enable the events, false to disable // // Returns: // S_OK if no error // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible // virtual HRESULT EnableGCNotificationEvents(BOOL fEnable) = 0; typedef enum { kClosedDelegate, kOpenDelegate, kOpenInstanceVSD, kClosedStaticWithScpecialSig, kTrueMulticastDelegate, kWrapperDelegate, kUnmanagedFunctionDelegate, kUnknownDelegateType } DelegateType; // Returns true if the object is a type deriving from System.MulticastDelegate // // Arguments: // vmObject - pointer to runtime object to query for. // virtual BOOL IsDelegate(VMPTR_Object vmObject) = 0; // Returns the delegate type virtual HRESULT GetDelegateType(VMPTR_Object delegateObject, DelegateType *delegateType) = 0; virtual HRESULT GetDelegateFunctionData( DelegateType delegateType, VMPTR_Object delegateObject, OUT VMPTR_DomainAssembly *ppFunctionDomainAssembly, OUT mdMethodDef *pMethodDef) = 0; virtual HRESULT GetDelegateTargetObject( DelegateType delegateType, VMPTR_Object delegateObject, OUT VMPTR_Object *ppTargetObj, OUT VMPTR_AppDomain *ppTargetAppDomain) = 0; virtual HRESULT GetLoaderHeapMemoryRanges(OUT DacDbiArrayList<COR_MEMORY_RANGE> *pRanges) = 0; virtual HRESULT IsModuleMapped(VMPTR_Module pModule, OUT BOOL *isModuleMapped) = 0; virtual bool MetadataUpdatesApplied() = 0; // The following tag tells the DD-marshalling tool to stop scanning. // END_MARSHAL //----------------------------------------------------------------------------- // Utility interface used for passing strings out of these APIs. The caller // provides an implementation of this that uses whatever memory allocation // strategy it desires, and IDacDbiInterface APIs will call AssignCopy in order // to pass back the contents of strings. // // This permits the client and implementation of IDacDbiInterface to be in // different DLLs with their own heap allocation mechanism, while avoiding // the ugly and verbose 2-call C-style string passing API pattern. //----------------------------------------------------------------------------- class IStringHolder { public: // // Store a copy of of the provided string. // // Arguments: // psz - The null-terminated unicode string to copy. // // Return Value: // S_OK on success, typical HRESULT return values on failure. // // Notes: // The underlying object is responsible for allocating and freeing the // memory for this copy. The object must not store the value of psz, // it is no longer valid after this call returns. // virtual HRESULT AssignCopy(const WCHAR * psz) = 0; }; //----------------------------------------------------------------------------- // Interface for allocations // This lets DD allocate buffers to pass back to DBI; and thus avoids // the common 2-step (query size/allocate/query data) pattern. // // Note that mscordacwks.dll and clients cannot share the same heap allocator, // DAC statically links the CRT to avoid run-time dependencies on non-OS libraries. //----------------------------------------------------------------------------- class IAllocator { public: // Allocate // Expected to throw on error. virtual void * Alloc(SIZE_T lenBytes) = 0; // Free. This shouldn't throw. virtual void Free(void * p) = 0; }; //----------------------------------------------------------------------------- // Callback interface to provide Metadata lookup. //----------------------------------------------------------------------------- class IMetaDataLookup { public: // // Lookup a metadata importer via PEAssembly. // // Returns: // A IMDInternalImport used by dac-ized VM code. The object is NOT addref-ed. See lifespan notes below. // Returns NULL if no importer is available. // Throws on exceptional circumstances (eg, detects the debuggee is corrupted). // // Notes: // IMDInternalImport is a property of PEAssembly. The DAC-ized code uses it as a weak reference, // and so we avoid doing an AddRef() here because that would mean we need to add Release() calls // in DAC-only paths. // The metadata importers are not DAC-ized, and thus we have a local copy in the host. // If it was dac-ized, then DAC would get the importer just like any other field. // // lifespan of returned object: // - DBI owns the metadata importers. // - DBI must not free the importer without calling Flush() on DAC first. // - DAC will only invoke this when in a DD primitive, which was in turn invoked by DBI. // - For performance reasons, we want to allow DAC to cache this between Flush() calls. // - If DAC caches the importer, it will only use it when DBI invokes a DD primitive. // - the reference count of the returned object is not adjusted. // virtual IMDInternalImport * LookupMetaData(VMPTR_PEAssembly addressPEAssembly, bool &isILMetaDataForNGENImage) = 0; }; }; // end IDacDbiInterface #endif // _DACDBI_INTERFACE_H_
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // DacDbiInterface.h // // // Define the interface between the DAC and DBI. //***************************************************************************** #ifndef _DACDBI_INTERFACE_H_ #define _DACDBI_INTERFACE_H_ #include <metahost.h> // The DAC/DBI interface can use structures and LSPTR declarations from the // existing V2 interfaces #include "dbgipcevents.h" //----------------------------------------------------------------------------- // Deallocation function for memory allocated with the global IAllocator object. // // Arguments: // p - pointer to delete. Allocated with IAllocator::Alloc // // Notes: // This should invoke the dtor and then call IAllocator::Free. // In the DAC implementation, this will call via IAllocator. // In the DBI implementation, this can directly call delete (assuming the IAllocator::Free // directly called new). template<class T> void DeleteDbiMemory(T *p); template<class T> void DeleteDbiArrayMemory(T *p, int count); // Need a class to serve as a tag that we can use to overload New/Delete. class forDbiWorker {}; extern forDbiWorker forDbi; extern void * operator new(size_t lenBytes, const forDbiWorker &); extern void * operator new[](size_t lenBytes, const forDbiWorker &); extern void operator delete(void *p, const forDbiWorker &); extern void operator delete[](void *p, const forDbiWorker &); // The dac exposes a way to walk all GC references in the process. This // includes both strong references and weak references. This is done // through a referece walk. typedef void* * RefWalkHandle; #include "dacdbistructures.h" // This is the current format of code:DbiVersion. It needs to be rev'ed when we decide to store something // else other than the product version of the DBI in DbiVersion (e.g. a timestamp). See // code:CordbProcess::CordbProcess#DBIVersionChecking for more information. const DWORD kCurrentDbiVersionFormat = 1; //----------------------------------------------------------------------------- // This is a low-level interface between DAC and DBI. // The DAC is the raw DAC-ized code from the EE. // DBI is the implementation of ICorDebug on top of that. // // This interface should be: // - Stateless: The DAC component should not have any persistent state. It should not have any resources // that it needs to clean up. DBI can store all the state (eg, list of of modules). // Using IAllocator/IStringHolder interfaces to allocate data to pass back out is ok because DBI owns // the resources, not the DAC layer. // - blittable: The types on the interface should be blittable. For example, use TIDs instead of OS Thread handles. // Passing pointers to be used as out-parameters is ok. // - lightweight: it will inevitably have many methods on it and should be very fluid to use. // - very descriptive: heavily call out liabilities on the runtime. For example, don't just have a method like // "GetName" where Name is ambiguous. Heavily comment exactly what Name is, when it may fail, if it's 0-length, // if it's unique, etc. This serves two purposes: // a) it helps ensure the right invariants flow up to the public API level. // b) it helps ensure that the debugger is making the right assumptions about the runtime's behavior. // // #Marshaling: // This interface should be marshalable such that the caller (the Right Side) can exist in one // process, while the implementation of Dac could be on another machine. // - All types need to be marshable. // - Use OUT and OPTIONAL as defined in windef.h to guide the marshaler. Here are how types are marshaled: // T : value-type, copied on input. // T* : will be marshaled as non-null by-ref (copy on input, copy on return), // const T*: non-null, copy on input only. // OUT T*: non-null copy-on-return only. // OPTIONAL T*: by-ref, could be null. // - The marshaler has special knowledge of IStringHolder and DacDbiArrayList<T>. // - You can write custom marshalers for non-blittable structures defined in DacDbiStructures.h. // - There is custom handling for marshalling callbacks. // // // Threading: The interface (and the underlying DataTarget) are free-threaded to leverage // concurrency. // // Allocation: // This interface can use IAllocator to allocate objects and hand them back. The allocated objects should be: // - closed, serializable object graphs. // - should have private fields and public accessors // - have dtors that free any allocated the memory via calling DeleteDbiMemory. // Objects can be declared in a header and shared between both dbi and dac. // Consider using DacDbiArrayList<T> instead of custom allocations. // Error handling: // Any call on the interface may fail. For example, the data-target may not have access to the necessary memory. // Methods should throw on error. // // #Enumeration // General rules about Enumerations: // - Do not assume that enumerations exposed here are in any particular order. // - many enumerations also correspond to Load/Unload events. Since load/unload aren't atomic with publishing // in an enumeration, this is a Total Ordering of things: // a) object shows up in enumeration // b) load event. // c) ... steady state ... // d) object removed from DacDbi enumeration; // Any existing handles we get beyond this are explicitly associated with a Cordb* object; which can be // neutered on the unload event by Dbi. // e) unload event. // - Send after it's reachability from other objects is broken. (Eg, For AppDomain unload // means no threads left in that appdomain) // - Send before it's deleted (so VMPTR is still valid; not yet recycled). // - Send early enough that property access can at least gracefully fail. (eg, // Module::GetName should either return the name, or fail) // // Cordb must neuter any Cordb objects that have any pre-existing handles to the object. // After this point, gauranteed that nobody can discover the VMPTR any more: // - doesn't show up in enumerations (so can't be discoverered implicitly) // - object should not be discoverable by other objects in VM. // - any Cordb object that already had it would be neutered by Dbi. // - Therefore nothing should even be asking Dac for it. // f) object deleted. // Think of it like this: The event occurs to let you know that the enumeration has been updated. // // A robust debugger should not rely on events for correctness. For example, // a corrupt debuggee may send: // 1) multiple load events. (if target repeats due to an issue) // 2) no load event and only an unload event. (if target fails inbetween // publish (a) and load (b), and then backout code sends the unload). // 3) no unload event. (eg, if target is rudely killed) // 4) multiple unload events (if target repeats due to bug) // // This satisfies the following rules: // - once you get the load event, you can find the object via enumeration // - once an item is discoverable, it must immediately show up in the enumeration. // - once you get the unload event, the object is dead and can't be rediscovered via enumeration. // // This is an issue even for well-behaved targets. Imagine if a debugger attaches right after // an unload event is sent. We don't want the debugger to enumerate and re-discover the // unloaded object because now that the unload event is already sent, the debugger won't get // any further notification of when the object is deleted in the target. // Thus it's valuable for the debugger to have debug-only checks after unload events to assert // that the object is no longer discoverable. // //............................................................................. // The purpose of this object is to provide EE funcationality back to // the debugger. This represents the entire set of EE functions used // by the debugger. // // We will make this interface larger over time to grow the functionality // between the EE and the Debugger. // // //----------------------------------------------------------------------------- class IDacDbiInterface { public: class IStringHolder; // The following tag tells the DD-marshalling tool to start scanning. // BEGIN_MARSHAL //----------------------------------------------------------------------------- // Functions to control the behavior of the DacDbi implementation itself. //----------------------------------------------------------------------------- // // Check whether the version of the DBI matches the version of the runtime. // This is only called when we are remote debugging. On Windows, we should have checked all the // versions before we call any API on the IDacDbiInterface. See // code:CordbProcess::CordbProcess#DBIVersionChecking for more information on version checks. // // Return Value: // S_OK on success. // // Notes: // THIS MUST BE THE FIRST API ON THE INTERFACE! // virtual HRESULT CheckDbiVersion(const DbiVersion * pVersion) = 0; // // Flush the DAC cache. This should be called when target memory changes. // // // Return Value: // S_OK on success. // // Notes: // If this fails, the interface is in an undefined state. // This must be called anytime target memory changes, else all other functions // (besides Destroy) may yield out-of-date or semantically incorrect results. // virtual HRESULT FlushCache() = 0; // // Control DAC's checking of the target's consistency. Specifically, if this is disabled then // ASSERTs in VM code are ignored. The default is disabled, since DAC should do it's best to // return results even with a corrupt or unsyncrhonized target. See // code:ClrDataAccess::TargetConsistencyAssertsEnabled for more details. // // When testing with a non-corrupt and properly syncrhonized target, this should be enabled to // help catch bugs. // // Arguments: // fEnableAsserts - whether ASSERTs should be raised when consistency checks fail (_DEBUG // builds only) // // Notes: // In the future we may want to extend DAC target consistency checks to be retail checks // (exceptions) as well. We'll also need a mechanism for disabling them (eg. when an advanced // user wants to try to get a result anyway even though the target is inconsistent). In that // case we'll want an additional argument here for enabling/disabling the throwing of // consistency failures exceptions (this is independent from asserts - there are legitimate // scenarios for all 4 combinations). // virtual void DacSetTargetConsistencyChecks(bool fEnableAsserts) = 0; // // Destroy the interface object. The client should call this when it's done // with the IDacDbiInterface to free up any resources. // // Return Value: // None. // // Notes: // The client should not call anything else on this interface after Destroy. // virtual void Destroy() = 0; //----------------------------------------------------------------------------- // General purpose target inspection functions //----------------------------------------------------------------------------- // // Query if Left-side is started up? // // // Return Value: // BOOL whether Left-side is intialized. // // Notes: // If the Left-side is not yet started up, then data in the LS is not yet initialized enough // for us to make meaningful queries, but the runtime will fire "Startup Exception" when it is. // // If the left-side is started up, then data is ready. (Although data may be temporarily inconsistent, // see DataSafe). We may still get a Startup Exception in these cases, but it can be ignored. // virtual BOOL IsLeftSideInitialized() = 0; // // Get an LS Appdomain via an AppDomain unique ID. // Fails if the AD is not found or if the ID is invalid. // // Arguments: // appdomainId - "unique appdomain ID". Must be a valid Id. // // Return Value: // VMPTR_AppDomain for the corresponding AppDomain ID. Else throws. // // Notes: // This query is based off the lifespan of the AppDomain from the VM's perspective. // The AppDomainId is most likely obtained from an AppDomain-Created debug events. // An AppDomainId is unique for the lifetime of the VM. // This is the inverse function of GetAppDomainId(). // virtual VMPTR_AppDomain GetAppDomainFromId(ULONG appdomainId) = 0; // // Get the AppDomain ID for an AppDomain. // // Arguments: // vmAppDomain - VM pointer to the AppDomain object of interest // // Return Value: // AppDomain ID for appdomain. Else throws. // // Notes: // An AppDomainId is unique for the lifetime of the VM. It is non-zero. // virtual ULONG GetAppDomainId(VMPTR_AppDomain vmAppDomain) = 0; // // Get the managed AppDomain object for an AppDomain. // // Arguments: // vmAppDomain - VM pointer to the AppDomain object of interest // // Return Value: // objecthandle for the managed app domain object or the Null VMPTR if there is no // object created yet // // Notes: // The AppDomain managed object is lazily constructed on the AppDomain the first time // it is requested. It may be NULL. // virtual VMPTR_OBJECTHANDLE GetAppDomainObject(VMPTR_AppDomain vmAppDomain) = 0; virtual void GetAssemblyFromDomainAssembly(VMPTR_DomainAssembly vmDomainAssembly, OUT VMPTR_Assembly * vmAssembly) = 0; // // Determines whether the runtime security system has assigned full-trust to this assembly. // // Arguments: // vmDomainAssembly - VM pointer to the assembly in question. // // Return Value: // Returns trust status for the assembly. // Throws on error. // // Notes: // Of course trusted malicious code in the process could always cause this API to lie. However, // an assembly loaded without full-trust should have no way of causing this API to return true. // virtual BOOL IsAssemblyFullyTrusted(VMPTR_DomainAssembly vmDomainAssembly) = 0; // // Get the full AD friendly name for the given EE AppDomain. // // Arguments: // vmAppDomain - VM pointer to the AppDomain. // pStrName - required out parameter where the name will be stored. // // Return Value: // None. On success, sets the string via the holder. Throws on error. // This either sets pStrName or Throws. It won't do both. // // Notes: // AD names have an unbounded length. AppDomain friendly names can also change, and // so callers should be prepared to listen for name-change events and requery. // AD names are specified by the user. // virtual void GetAppDomainFullName( VMPTR_AppDomain vmAppDomain, IStringHolder * pStrName) = 0; // // #ModuleNames // // Modules / Assemblies have many different naming schemes: // // 1) Metadata Scope name: All modules have metadata, and each metadata scope has a name assigned // by the creator of that scope (eg, the compiler). This usually is similar to the filename, but could // be arbitrary. // eg: "Foo" // // 2) FileRecord: the File record entry in the manifest module's metadata (table 0x26) for this module. // eg: "Foo" // // 3) Managed module path: This is path that the image was loaded from. Eg, "c:\foo.dll". For non-file // based modules (like in-memory, dynamic), there is no file path. The specific path is determined by // fusion / loader policy. // eg: "c:\foo.dll" // // 4) GAC path: If the module is loaded from the GAC, this is the path on disk into the gac cache that // the image was pulled from. // eg: " // // 5) Ngen path: If the module was ngenned, this is the path on disk into the ngen cache that the image // was pulled from. // eg: // // 6) Fully Qualified Assembly Name: this is an abstract name, which the CLR (fusion / loader) will // resolve (to a filename for file-based modules). Managed apps may need to deal in terms of FQN, // but the debugging services generally avoid them. // eg: "Foo, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089, processorArchitecture=MSIL". // // // Get the "simple name" of a module. This is a heuristic within the CLR to return a simple, // not-well-specified, but meaningful, name for a module. // // Arguments: // vmModule - module to query // pStrFileName - string holder to get simple name. // // Return Value: // None, but pStrFilename will be initialized upon return. // Throws if there was a problem reading the data with DAC or if there is an OOM exception, // in which case no string was stored into pStrFilename. // // Notes: // See code:#ModuleNames for an overview on module names. // // This is really just using code:Module::GetSimpleName. // This gives back a meaningful name, which is generally some combination of the metadata // name of the FileRecord name. This is important because it's valid even when a module // doesn't have a filename. // // The simple name does not have any meaning. It is not a filename, does not necessarily have any // relationship to the filename, and it's not necesarily the metadata name. // Do not use the simple name for anything other than as a pretty string to give the an end user. // virtual void GetModuleSimpleName(VMPTR_Module vmModule, IStringHolder * pStrFilename) = 0; // // Get the full path and file name to the assembly's manifest module. // // Arguments: // vmAssembly - VM pointer to the Assembly. // pStrFilename - required out parameter where the filename will be stored. // // Return Value: // TRUE on success, in which case the filename was stored into pStrFilename // FALSE if the assembly has no filename (eg. for in-memory assemblies), in which // case an empty string was stored into pStrFilename. // Throws if there was a problem reading the data with DAC, in which case // no string was stored into pStrFilename. // // Notes: // See code:#ModuleNames for an overview on module names. // // Normally this is just the filename from which the dll containing the assembly was // loaded. In the case of multi-module assemblies, this is the filename for the // manifest module (the one containing the assembly manifest). For in-memory // assemblies (eg. those loaded from a Byte[], and those created by Reflection.Emit // which will not be saved to disk) there is no filename. In that case this API // returns an empty string. // virtual BOOL GetAssemblyPath(VMPTR_Assembly vmAssembly, IStringHolder * pStrFilename) = 0; // get a type def resolved across modules // Arguments: // input: pTypeRefInfo - domain file and type ref from the referencing module // output: pTargetRefInfo - domain file and type def from the referenced type (this may // come from a module other than the referencing module) // Note: throws virtual void ResolveTypeReference(const TypeRefData * pTypeRefInfo, TypeRefData * pTargetRefInfo) = 0; // // Get the full path and file name to the module (if any). // // Arguments: // vmModule - VM pointer to the module. // pStrFilename - required out parameter where the filename will be stored. // // Return Value: // TRUE on success, in which case the filename was stored into pStrFilename // FALSE the module has no filename (eg. for in-memory assemblies), in which // case an empty string was stored into pStrFilename. // Throws an exception if there was a problem reading the data with DAC, in which case // no string was stored into pStrFilename. // // Notes: // See code:#ModuleNames for an overview on module names. // // Normally this is just the filename from which the module was loaded. // For in-memory module (eg. those loaded from a Byte[], and those created by Reflection.Emit // which will not be saved to disk) there is no filename. In that case this API // returns an empty string. Consider GetModuleSimpleName in those cases. // // We intentionally don't use the function name "GetModuleFileName" here because // winbase #defines that token (along with many others) to have an A or W suffix. // virtual BOOL GetModulePath(VMPTR_Module vmModule, IStringHolder * pStrFilename) = 0; // // Get the full path and file name to the ngen image for the module (if any). // // Arguments: // vmModule - VM pointer to the module. // pStrFilename - required out parameter where the filename will be stored. // // Return Value: // TRUE on success, in which case the filename was stored into pStrFilename // FALSE the module has no filename (eg. for in-memory assemblies), in which // case an empty string was stored into pStrFilename. // Throws an exception if there was a problem reading the data with DAC, in which case // no string was stored into pStrFilename. // // Notes: // See code:#ModuleNames for an overview on module names. // virtual BOOL GetModuleNGenPath(VMPTR_Module vmModule, IStringHolder * pStrFilename) = 0; // Get the metadata for the target module // // Arguments: // vmModule - target module to get metadata for. // pTargetBuffer - Out parameter to get target-buffer for metadata. Gauranteed to be non-empty on // return. This will throw CORDBG_E_MISSING_METADATA hr if the buffer is empty. // This does not gaurantee that the buffer is readable. For example, in a minidump, buffer's // memory may not be present. // // Notes: // Each module's metadata exists as a raw buffer in the target. This finds that target buffer and // returns it. The host can then use OpenScopeOnMemory to create an instance of the metadata in // the host process space. // // For dynamic modules, the CLR will eagerly serialize the metadata at "debuggable" points. This // could be after each type is loaded; or after a bulk update. // For non-dynamic modules (both in-memory and file-based), the metadata exists in the PEAssembly's image. // // Failure cases: // This should succeed in normal, live-debugging scenarios. However, common failure paths here would be: // // 1. Data structures are intact, but Unable to even find the TargetBuffer in the target. In this // case Metadata is truly missing. Likely means: // - target is in the middle of generating metadata for a large bulk operation. (For example, attach // to a TypeLibConverter using Ref.Emit to emit a module for a very large .tlb file). // - corrupted target, // - or the target had some error(out-of-memory?) generating the metadata. // This throws CORDBG_E_MISSING_METADATA. // // 2. Target buffer is found, but memory it describes is not present. Likely means a minidump // scenario with missing memory. Client should use alternative metadata location techniques (such as // an ImagePath to locate the original image and then pulling metadata from that file). // virtual void GetMetadata(VMPTR_Module vmModule, OUT TargetBuffer * pTargetBuffer) = 0; // Definitions for possible symbol formats // This is equivalent to code:ESymbolFormat in the runtime typedef enum { kSymbolFormatNone, // No symbols available kSymbolFormatPDB, // PDB symbol format - use diasymreader.dll } SymbolFormat; // // Get the in-memory symbol (PDB/ILDB) buffer in the target if present. // // Arguments: // vmModule- module to query for. // pTargetBuffer - out parameter to get buffer in target of symbols. If no symbols, pTargetBuffer is empty on return. // pSymbolFormat - out parameter to get the format of the symbols. // // Returns: // 1) If there are in-memory symbols for the given module, pTargetBuffer is set to the buffer describing // the symbols and pSymbolFormat is set to indicate PDB or ILDB format. This buffer can then be read, // converted into an IStream, and passed to ISymUnmanagedBinder::CreateReaderForStream. // 2) If the target is valid, but there is no symbols for the module, then pTargetBuffer->IsEmpty() == true // and *pSymbolFormat == kSymbolFormatNone. // 3) Else, throws exception. // // // Notes: // For file-based modules, PDBs are normally on disk and the debugger retreieves them via a symbol // path without any help from ICorDebug. // However, in some cases, the PDB is stored in-memory and so the debugger needs ICorDebug. Common // cases include: // - dynamic modules generated with reflection-emit. // - in-memory modules loaded by Load(Byte[],Byte[]), which provide the PDB as a byte[]. // - hosted modules where the host (such as SQL) store the PDB. // // In all cases, this can commonly fail. Executable code does not need to have a PDB. virtual void GetSymbolsBuffer(VMPTR_Module vmModule, OUT TargetBuffer * pTargetBuffer, OUT SymbolFormat * pSymbolFormat) = 0; // // Get properties for a module // // Arguments: // vmModule - vm handle to a module // pData - required out parameter which will be filled out with module properties // // Notes: // See definition of DomainAssemblyInfo for more details about what properties // this gives back. virtual void GetModuleData(VMPTR_Module vmModule, OUT ModuleInfo * pData) = 0; // // Get properties for a DomainAssembly // // Arguments: // vmDomainAssembly - vm handle to a DomainAssembly // pData - required out parameter which will be filled out with module properties // // Notes: // See definition of DomainAssemblyInfo for more details about what properties // this gives back. virtual void GetDomainAssemblyData(VMPTR_DomainAssembly vmDomainAssembly, OUT DomainAssemblyInfo * pData) = 0; virtual void GetModuleForDomainAssembly(VMPTR_DomainAssembly vmDomainAssembly, OUT VMPTR_Module * pModule) = 0; //......................................................................... // These methods were the methods that DBI was calling from IXClrData in V2. // We imported them over to this V3 interface so that we can sever all ties between DBI and the // old IXClrData. // // The exact semantics of these are whatever their V2 IXClrData counterpart did. // We may eventually migrate these to their real V3 replacements. //......................................................................... // "types" of addresses. This is taken exactly from the definition, but renamed to match // CLR coding conventions. typedef enum { kAddressUnrecognized, kAddressManagedMethod, kAddressRuntimeManagedCode, kAddressRuntimeUnmanagedCode, kAddressGcData, kAddressRuntimeManagedStub, kAddressRuntimeUnmanagedStub, } AddressType; // // Get the "type" of address. // // Arguments: // address - address to query type. // // Return Value: // Type of address. Throws on error. // // Notes: // This is taken exactly from the IXClrData definition. // This is provided for V3 compatibility to support Interop-debugging. // This should eventually be deprecated. // virtual AddressType GetAddressType(CORDB_ADDRESS address) = 0; // // Query if address is a CLR stub. // // Arguments: // address - Target address to query for. // // // Return Value: // true if the address is a CLR stub. // // Notes: // This is used to implement ICorDebugProcess::IsTransitionStub // This yields true if the address is claimed by a CLR stub manager, or if the IP is in mscorwks. // Conceptually, This should eventually be merged with GetAddressType(). // virtual BOOL IsTransitionStub(CORDB_ADDRESS address) = 0; //......................................................................... // Get the values of the JIT Optimization and EnC flags. // // Arguments: // vmDomainAssembly - (input) VM DomainAssembly (module) for which we are retrieving flags // pfAllowJITOpts - (mandatory output) true iff this is not compiled for debug, // i.e., without optimization // pfEnableEnc - (mandatory output) true iff this module has EnC enabled // // Return Value: // Returns on success. Throws on failure. // // Notes: // This is used to implement both ICorDebugModule2::GetJitCompilerFlags and // ICorDebugCode2::GetCompilerFlags. //......................................................................... virtual void GetCompilerFlags( VMPTR_DomainAssembly vmDomainAssembly, OUT BOOL * pfAllowJITOpts, OUT BOOL * pfEnableEnC) = 0; //......................................................................... // Set the values of the JIT optimization and EnC flags. // // Arguments: // vmDomainAssembly - (input) VM DomainAssembly (module) for which we are retrieving flags // pfAllowJITOpts - (input) true iff this should not be compiled for debug, // i.e., without optimization // pfEnableEnc - (input) true iff this module should have EnC enabled. If this is // false, no change is made to the EnC flags. In other words, once EnC is enabled, // there is no way to disable it. // // Return Value: // S_OK on success and all bits were set. // CORDBG_S_NOT_ALL_BITS_SET - if not all bits are set. Must use GetCompileFlags to // determine which bits were set. // CORDBG_E_CANT_CHANGE_JIT_SETTING_FOR_ZAP_MODULE - if module is ngenned. // Throw on other errors. // // Notes: // Caller can only use this at module-load before any methods are jitted. // This may be called multiple times. // This is used to implement both ICorDebugModule2::SetJitCompilerFlags and // ICorDebugModule::EnableJITDebugging. //......................................................................... virtual HRESULT SetCompilerFlags(VMPTR_DomainAssembly vmDomainAssembly, BOOL fAllowJitOpts, BOOL fEnableEnC) = 0; // // Enumerate all AppDomains in the process. // // Arguments: // fpCallback - callback to invoke on each appdomain // pUserData - user data to supply for each callback. // // Return Value: // Returns on success. Throws on error. // // Notes: // Enumerates all appdomains in the process, including the Default-domain. // Appdomains must show up in this list before the AD Load event is sent, and before // that appdomain is discoverable from the debugger. // See enumeration rules for details. // typedef void (*FP_APPDOMAIN_ENUMERATION_CALLBACK)(VMPTR_AppDomain vmAppDomain, CALLBACK_DATA pUserData); virtual void EnumerateAppDomains(FP_APPDOMAIN_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData) = 0; // // Eunmerate all Assemblies in an appdomain. Enumerations is in load-order // // Arguments: // vmAppDomain - domain in which to enumerate // fpCallback - address to query type. // pUserData - required out parameter for type of address. // // Return Value: // Returns on success. Throws on error. // // Notes: // Enumerates all executable assemblies (both shared and unshared) within an appdomain. // This does not include inspection-only assemblies because those are just data and // not executable (eg, they'll never show up on the stack and you can't set a breakpoint in them). // This enumeration needs to be consistent with load/unload events. // See enumeration rules for details. // // The order of the enumeration is the order the assemblies where loaded. // Ultimately, the debugger needs to be able to tell the user the load // order of assemblies (it can do this with native dlls). Since // managed assembliees don't 1:1 correspond to native dlls, debuggers // need this information from the runtime. // typedef void (*FP_ASSEMBLY_ENUMERATION_CALLBACK)(VMPTR_DomainAssembly vmDomainAssembly, CALLBACK_DATA pUserData); virtual void EnumerateAssembliesInAppDomain(VMPTR_AppDomain vmAppDomain, FP_ASSEMBLY_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData) = 0; // // Callback function for EnumerateModulesInAssembly // // This can throw on error. // // Arguments: // vmModule - new module from the enumeration // pUserData - user data passed to EnumerateModulesInAssembly typedef void (*FP_MODULE_ENUMERATION_CALLBACK)(VMPTR_DomainAssembly vmModule, CALLBACK_DATA pUserData); // // Enumerates all the code Modules in an assembly. // // Arguments: // vmAssembly - assembly to enumerate within // fpCallback - callback function to invoke on each module // pUserData - arbitrary data passed to the callback // // Notes: // This only enumerates "code" modules (ie, modules that have executable code in them). That // includes normal file-based, ngenned, in-memory, and even dynamic modules. // That excludes: // - Resource modules (which have no code or metadata) // - Inspection-only modules. These are viewed as pure data from the debugger's perspective. // virtual void EnumerateModulesInAssembly( VMPTR_DomainAssembly vmAssembly, FP_MODULE_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData) = 0; // // When stopped at an event, request a synchronization. // // // Return Value: // Returns on success. Throws on error. // // Notes: // Call this when an event is dispatched (eg, LoadModule) to request the runtime // synchronize. This does a cooperative sync with the LS. This is not an async break // and can not be called at arbitrary points. // This primitive lets the LS always take the V3 codepath and defer decision making to the RS. // The V2 behavior is to call this after every event (Since that's what V2 did). // The V3 behavior is to never call this. // // If this is called, the LS will sync and we will get a SyncComplete. // // This is also like a precursor to "AsyncBreakAllOtherThreads" // virtual void RequestSyncAtEvent() = 0; // Sets a flag inside LS.Debugger that indicates that // 1. all "first chance exception" events should not be sent to the debugger // 2. "exception handler found" events for exceptions never crossing JMC frames should not be sent to the debugger // // Arguments: // sendExceptionsOutsideOfJMC - new value for the flag Debugger::m_sendExceptionsOutsideOfJMC. // // Return Value: // Returns error code, never throws. // // Note: This call is used by ICorDebugProcess8.EnableExceptionCallbacksOutsideOfMyCode. virtual HRESULT SetSendExceptionsOutsideOfJMC(BOOL sendExceptionsOutsideOfJMC) = 0; // // Notify the debuggee that a debugger atach is pending. // // Arguments: // None // // Return Value: // Returns on success. Throws on error. // // Notes: // Attaching means that CORDebuggerPendingAttach() will now return true. // This doesn't do anything else (eg, no fake events). // // @dbgtodo- still an open Feature-Crew decision how this is exposed publicly. virtual void MarkDebuggerAttachPending() = 0; // // Notify the debuggee that a debugger is attached / detached. // // Arguments: // fAttached - true if we're attaching, false if we're detaching. // // Return Value: // Returns on success. Throws on error. // // Notes: // Attaching means that CorDebuggerAttached() will now return true. // This doesn't do anything else (eg, no fake events). // This lets the V3 codepaths invade the LS to subscribe to events. // // @dbgtodo- still an open Feature-Crew decision how this is exposed publicly. virtual void MarkDebuggerAttached(BOOL fAttached) = 0; // // Hijack a thread. This will effectively do a native func-eval of the thread to set the IP // to a hijack stub and push the parameters. // // Arguments: // dwThreadId - OS thread to hijack. This must be consistent with pRecord and pOriginalContext // pRecord - optional pointer to Exception record. Required if this is hijacked at an exception. // NULL if this is hijacked at a managed IP. // pOriginalContext - optional pointer to buffer to receive the context that the thread is hijacked from. // The caller can use this to either restore the hijack or walk the hijack. // cbSizeContext - size in bytes of buffer pointed to by pContext // reason - reason code for the hijack. The hijack stub can then delegate to the proper hijack. // pUserData - arbitrary data passed through to hijack. This is reason-depedendent. // pRemoteContextAddr - If non-NULL this receives the remote address where the CONTEXT was written in the // in the debuggee. // // Assumptions: // Caller must guarantee this is safe. // This is intended to be used at a thread that either just had an exception or is at a managed IP. // If this is hijacked at an exception, client must cancel the exception (gh / DBG_CONTINUE) // so that the OS exception processing doesn't interfere with the hijack. // // Notes: // Hijack is hard, so we want 1 hijack stub that handles all our hijacking needs. // This lets us share: // - assembly stubs (which are very platform specific) // - hijacking / restoration mechanics, // - making the hijack walkable via the stackwalker. // // Hijacking can be used to implement: func-eval, FE abort, Synchronizing, // dispatching Unhandled Exception notifications. // // Nesting: Since Hijacking passes the key state off to the hijacked thread, (such as original // context to be used with restoring the hijack), the raw hijacking nests just like function // calls. However, the client may need to keep additional state to handle nesting. For example, // nested hijacks will require the client to track multiple CONTEXT*. // // If the thread is in jitted code, then the hijack needs to cooperate with the in-process // stackwalker that the GC uses. It must be in cooperative mode, and push a Frame on the // frame chain to protect the managed frames it hijacked from before it goes to preemptive mode. virtual void Hijack( VMPTR_Thread vmThread, ULONG32 dwThreadId, const EXCEPTION_RECORD * pRecord, T_CONTEXT * pOriginalContext, ULONG32 cbSizeContext, EHijackReason::EHijackReason reason, void * pUserData, CORDB_ADDRESS * pRemoteContextAddr) = 0; // // Callback function for connection enumeration. // // Arguments: // id - the connection ID. // pName - the name of the connection. // pUserData - user data supplied to EnumerateConnections typedef void (*FP_CONNECTION_CALLBACK)(DWORD id, LPCWSTR pName, CALLBACK_DATA pUserData); // // Enumerate all the Connections in the process. // // Arguments: // fpCallback - callback to invoke for each connection // pUserData - random user data to pass to callback. // // Notes: // This enumerates all the connections. The host notifies the debugger of Connections // via the ICLRDebugManager interface. // ICorDebug has no interest in connections. It's merely the transport between the host and the debugger. // Ideally, that transport would be more general. // // V2 Attach would provide faked up CreateConnection, ChangeConnection events on attach. // This enumeration ability allows V3 to emulate that behavior. // // // Enumerate all threads in the target. // // Arguments: // fpCallback - callback function to invoke on each thread. // pUserData - arbitrary user data supplied to each callback. // // Notes: // This enumerates the ThreadStore in the target, which is all the Thread* objects. // This includes threads that have entered the runtime. This may include threads // even before that thread has executed IL and after that thread no longer has managed // code on its stack. // Callback invoked for each thread. typedef void (*FP_THREAD_ENUMERATION_CALLBACK)(VMPTR_Thread vmThread, CALLBACK_DATA pUserData); virtual void EnumerateThreads(FP_THREAD_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData) = 0; // Check if the thread is dead // // Arguments: // vmThread - valid thread to check if it's dead. // // Returns: true if the thread is "dead", which means it can never call managed code again. // // Notes: // #IsThreadMarkedDead // Threads shutdown states are: // 1) Thread is running managed code normally. Thread eventually exits all managed code and // gets to a point where it will never call managed code again. // 2) Thread is marked as dead. // - For threads created outside of the runtime (such as a native thread that wanders into // managed code), this mark can happen in DllMain(ThreadDetach) // - For threads created by the runtime (eg, System.Threading.Thread.Start), this may be done // at the top of the threads stack after it calls the user's Thread-Proc. // 3) MAYBE Native thread exits at this point (or it may not). This would be the common case // for threads created outside the runtime. // 4) Thread exit event is sent. // - For threads created by the runtime, this may be sent at the top of the thread's // stack (or even when we know that the thread will never execute managed code again) // - For threads created outside the runtime, this is more difficult. A thread can // call into managed code and then return, and then call back into managed code at a // later time (The finalizer does this!). So it's not clear when the native thread // actually exits and will never call managed code again. The only hook we have for // this is DllMain(Thread-Detach). We can mark bits in DllMain, but we can't send // debugger notifications (too dangerous from such a restricted context). // So we may mark the thread as dead, but then sweep later (perhaps on the finalizer // thread), and thus send the Exit events later. // 5) Native thread may exit at this point. This is the common case for threads created by // the runtime. // // The underlying native thread may have exited at eitehr #3 or #5. Because of this // flexibility, we don't want to rely on native thread exit events. // This function checks if a Thread is passed state #2 (marked as dead). The key invariant // is that once a thread is marked as dead: // - it can never call managed code again. // - it should not be discoverable by DacDbi enumerations. // // DBI should prefer relying on IsThreadMarkedDead rather than event notifications (either // managed or native) because tracking events requires that DBI maintain state, which means // that attach + dump cases may break. For example, we want a full dump at the ExitThread // event to have the same view as a live process at the ExitThread event. // // We avoid relying on the native thread exit notifications because: // - that's a specific feature of the Win32 debugging API that may not be available on other platforms. // - the only native events the pipeline gets are Exceptions. // // Whether a thread is dead can be inferred from the ICorDebug API. However, we have this // on DacDbi to ensure that this definition is consistent with the other DacDbi methods, // especially the enumeration and discovery rules. virtual bool IsThreadMarkedDead(VMPTR_Thread vmThread) = 0; // // Return the handle of the specified thread. // // Arguments: // vmThread - the specified thread // // Return Value: // the handle of the specified thread // // @dbgtodo- this should go away in V3. This is useless on a dump. virtual HANDLE GetThreadHandle(VMPTR_Thread vmThread) = 0; // // Return the object handle for the managed Thread object corresponding to the specified thread. // // Arguments: // vmThread - the specified thread // // Return Value: // This function returns the object handle for the managed Thread object corresponding to the // specified thread. The return value may be NULL if a managed Thread object has not been created // for the specified thread yet. // virtual VMPTR_OBJECTHANDLE GetThreadObject(VMPTR_Thread vmThread) = 0; // // Get the allocation info corresponding to the specified thread. // // Arguments: // vmThread - the specified thread // threadAllocInfo - the allocated bytes from SOH and UOH so far on this thread // virtual void GetThreadAllocInfo(VMPTR_Thread vmThread, DacThreadAllocInfo* threadAllocInfo) = 0; // // Set and reset the TSNC_DebuggerUserSuspend bit on the state of the specified thread // according to the CorDebugThreadState. // // Arguments: // vmThread - the specified thread // debugState - the desired CorDebugThreadState // virtual void SetDebugState(VMPTR_Thread vmThread, CorDebugThreadState debugState) = 0; // // Returns TRUE if this thread has an unhandled exception // // Arguments: // vmThread - the thread to query // // Return Value // TRUE iff this thread has an unhandled exception // virtual BOOL HasUnhandledException(VMPTR_Thread vmThread) = 0; // // Return the user state of the specified thread. Most of the state are derived from // the ThreadState of the specified thread, e.g. TS_Background, TS_Unstarted, etc. // The exception is USER_UNSAFE_POINT, which we need to do a one-frame stackwalk to figure out. // // Arguments: // vmThread - the specified thread // // Return Value: // the user state of the specified thread // virtual CorDebugUserState GetUserState(VMPTR_Thread vmThread) = 0; // // Returns most of the user state of the specified thread, // i.e. flags which can be derived from the ThreadState: // USER_STOP_REQUESTED, USER_SUSPEND_REQUESTED, USER_BACKGROUND, USER_UNSTARTED // USER_STOPPED, USER_WAIT_SLEEP_JOIN, USER_SUSPENDED, USER_THREADPOOL // // Only USER_UNSAFE_POINT is always set to 0, since it takes additional stackwalk. // If you need USER_UNSAFE_POINT, use GetUserState(VMPTR_Thread); // // Arguments: // vmThread - the specified thread // // Return Value: // the user state of the specified thread // virtual CorDebugUserState GetPartialUserState(VMPTR_Thread vmThread) = 0; // // Return the connection ID of the specified thread. // // Arguments: // vmThread - the specified thread // // Return Value: // the connection ID of the specified thread // virtual CONNID GetConnectionID(VMPTR_Thread vmThread) = 0; // // Return the task ID of the specified thread. // // Arguments: // vmThread - the specified thread // // Return Value: // the task ID of the specified thread // virtual TASKID GetTaskID(VMPTR_Thread vmThread) = 0; // // Return the OS thread ID of the specified thread // // Arguments: // vmThread - the specified thread; cannot be NULL // // Return Value: // the OS thread ID of the specified thread. Returns 0 if not scheduled. // virtual DWORD TryGetVolatileOSThreadID(VMPTR_Thread vmThread) = 0; // // Return the unique thread ID of the specified thread. The value used for the thread ID changes // depending on whether the runtime is being hosted. In non-hosted scenarios, a managed thread will // always be associated with the same native thread, and so we can use the OS thread ID as the thread ID // for the managed thread. In hosted scenarios, however, a managed thread may run on multiple native // threads. It may not even have a backing native thread if it's switched out. Therefore, we can't use // the OS thread ID as the thread ID. Instead, we use the internal managed thread ID. // // Arguments: // vmThread - the specified thread; cannot be NULL // // Return Value: // Returns a stable and unique thread ID for the lifetime of the specified managed thread. // virtual DWORD GetUniqueThreadID(VMPTR_Thread vmThread) = 0; // // Return the object handle to the managed Exception object of the current exception // on the specified thread. The return value could be NULL if there is no current exception. // // Arguments: // vmThread - the specified thread // // Return Value: // This function returns the object handle to the managed Exception object of the current exception. // The return value may be NULL if there is no exception being processed, or if the specified thread // is an unmanaged thread which has entered and exited the runtime. // virtual VMPTR_OBJECTHANDLE GetCurrentException(VMPTR_Thread vmThread) = 0; // // Return the object handle to the managed object for a given CCW pointer. // // Arguments: // ccwPtr - the specified ccw pointer // // Return Value: // This function returns the object handle to the managed object for a given CCW pointer. // virtual VMPTR_OBJECTHANDLE GetObjectForCCW(CORDB_ADDRESS ccwPtr) = 0; // // Return the object handle to the managed CustomNotification object of the current notification // on the specified thread. The return value could be NULL if there is no current notification. // // Arguments: // vmThread - the specified thread on which the notification occurred // // Return Value: // This function returns the object handle to the managed CustomNotification object of the current notification. // The return value may be NULL if there is no current notification. // virtual VMPTR_OBJECTHANDLE GetCurrentCustomDebuggerNotification(VMPTR_Thread vmThread) = 0; // // Return the current appdomain the specified thread is in. // // Arguments: // vmThread - the specified thread // // Return Value: // the current appdomain of the specified thread // // Notes: // This function throws if the current appdomain is NULL for whatever reason. // virtual VMPTR_AppDomain GetCurrentAppDomain(VMPTR_Thread vmThread) = 0; // // Resolve an assembly // // Arguments: // vmScope - module containing metadata that the token is scoped to. // tkAssemblyRef - assembly ref token to lookup. // // Returns: // Assembly that the loader/fusion has bound to the given assembly ref. // Returns NULL if the assembly has not yet been loaded (a common case). // Throws on error. // // Notes: // A single module has metadata that specifies references via tokens. The // loader/fusion goes through tremendous and random policy hoops to determine // which specific file actually gets bound to the reference. This policy includes // things like config files, registry settings, and many other knobs. // // The debugger can't duplicate this policy with 100% accuracy, and // so we need DAC to lookup the assembly that was actually loaded. virtual VMPTR_DomainAssembly ResolveAssembly(VMPTR_DomainAssembly vmScope, mdToken tkAssemblyRef) = 0; //----------------------------------------------------------------------------- // Interface for initializing the native/IL sequence points and native var info // for a function. // Arguments: // input: // vmMethodDesc MethodDesc of the function // startAddr starting address of the function--this serves to // differentiate various EnC versions of the function // fCodePitched indicates whether code for the function has been pitched // fJitComplete indicates whether the function has been jitted // output: // pNativeVarData space for the native code offset information for locals // pSequencePoints space for the IL/native sequence points // Return value: // none, but may throw an exception // Assumptions: // vmMethodDesc, pNativeVarInfo and pSequencePoints are non-NULL // Notes: //----------------------------------------------------------------------------- virtual void GetNativeCodeSequencePointsAndVarInfo(VMPTR_MethodDesc vmMethodDesc, CORDB_ADDRESS startAddress, BOOL fCodeAvailabe, OUT NativeVarData * pNativeVarData, OUT SequencePoints * pSequencePoints) = 0; // // Return the filter CONTEXT on the LS. Once we move entirely over to the new managed pipeline // built on top of the Win32 debugging API, this won't be necessary. // // Arguments: // vmThread - the specified thread // // Return Value: // the filter CONTEXT of the specified thread // // Notes: // This function should go away when everything is moved OOP and // we don't have a filter CONTEXT on the LS anymore. // virtual VMPTR_CONTEXT GetManagedStoppedContext(VMPTR_Thread vmThread) = 0; typedef enum { kInvalid, kManagedStackFrame, kExplicitFrame, kNativeStackFrame, kNativeRuntimeUnwindableStackFrame, kAtEndOfStack, } FrameType; // The stackwalker functions allocate persistent state within DDImpl. Clients can hold onto // this via an opaque StackWalkHandle. typedef void* * StackWalkHandle; // // Create a stackwalker on the specified thread and return a handle to it. // Initially, the stackwalker is at the filter CONTEXT if there is one. // Otherwise it is at the leaf CONTEXT. It DOES NOT fast forward to the first frame of interest. // // Arguments: // vmThread - the specified thread // pInternalContextBuffer - a CONTEXT buffer for the stackwalker to work with // ppSFIHandle - out parameter; return a handle to the stackwalker // // Notes: // Call DeleteStackWalk() to delete the stackwalk buffer. // This is a special case that violates the 'no state' tenant. // virtual void CreateStackWalk(VMPTR_Thread vmThread, DT_CONTEXT * pInternalContextBuffer, OUT StackWalkHandle * ppSFIHandle) = 0; // Delete the stackwalk object created from CreateStackWalk. virtual void DeleteStackWalk(StackWalkHandle ppSFIHandle) = 0; // // Get the CONTEXT of the current frame where the stackwalker is stopped at. // // Arguments: // pSFIHandle - the handle to the stackwalker // pContext - OUT: the CONTEXT to be filled out. The context control flags are ignored. // virtual void GetStackWalkCurrentContext(StackWalkHandle pSFIHandle, DT_CONTEXT * pContext) = 0; // // Set the stackwalker to the given CONTEXT. The CorDebugSetContextFlag indicates whether // the CONTEXT is "active", meaning that the IP is point at the current instruction, // not the return address of some function call. // // Arguments: // vmThread - the current thread // pSFIHandle - the handle to the stackwalker // flag - flag to indicate whether the specified CONTEXT is "active" // pContext - the specified CONTEXT. This may make correctional adjustments to the context's IP. // virtual void SetStackWalkCurrentContext(VMPTR_Thread vmThread, StackWalkHandle pSFIHandle, CorDebugSetContextFlag flag, DT_CONTEXT * pContext) = 0; // // Unwind the stackwalker to the next frame. The next frame could be any actual stack frame, // explicit frame, native marker frame, etc. Call GetStackWalkCurrentFrameInfo() to find out // more about the frame. // // Arguments: // pSFIHandle - the handle to the stackwalker // // Return Value: // Return TRUE if we successfully unwind to the next frame. // Return FALSE if there is no more frames to walk. // Throw on error. // virtual BOOL UnwindStackWalkFrame(StackWalkHandle pSFIHandle) = 0; // // Check whether the specified CONTEXT is valid. The only check we perform right now is whether the // SP in the specified CONTEXT is in the stack range of the thread. // // Arguments: // vmThread - the specified thread // pContext - the CONTEXT to be checked // // Return Value: // Return S_OK if the CONTEXT passes our checks. // Returns CORDBG_E_NON_MATCHING_CONTEXT if the SP in the specified CONTEXT doesn't fall in the stack // range of the thread. // Throws on error. // virtual HRESULT CheckContext(VMPTR_Thread vmThread, const DT_CONTEXT * pContext) = 0; // // Fill in the DebuggerIPCE_STRData structure with information about the current frame // where the stackwalker is stopped at. // // Arguments: // pSFIHandle - the handle to the stackwalker // pFrameData - the DebuggerIPCE_STRData to be filled out; // it can be NULL if you just want to know the frame type // // Return Value: // Return the type of the current frame // virtual FrameType GetStackWalkCurrentFrameInfo(StackWalkHandle pSFIHandle, OPTIONAL DebuggerIPCE_STRData * pFrameData) = 0; // // Return the number of internal frames on the specified thread. // // Arguments: // vmThread - the thread whose internal frames are being retrieved // // Return Value: // Return the number of internal frames. // // Notes: // Explicit frames are "marker objects" the runtime pushes on the stack to mark special places, e.g. // appdomain transition, managed-to- unmanaged transition, etc. Internal frames are only a subset of // explicit frames. Explicit frames which are not interesting to the debugger are not exposed (e.g. // GCFrame). Internal frames are interesting to the debugger if they have a CorDebugInternalFrameType // other than STUBFRAME_NONE. // // The user should call this function before code:IDacDbiInterface::EnumerateInternalFrames to figure // out how many interesting internal frames there are. // virtual ULONG32 GetCountOfInternalFrames(VMPTR_Thread vmThread) = 0; // // Enumerate the internal frames on the specified thread and invoke the provided callback on each of // them. Information about the internal frame is stored in the DebuggerIPCE_STRData. // // Arguments: // vmThread - the thread to be walked fpCallback - callback function invoked on each internal frame // pUserData - user-specified custom data // // Notes: // The user can call code:IDacDbiInterface::GetCountOfInternalFrames to figure out how many internal // frames are on the thread before calling this function. Also, refer to the comment of that function // to find out more about internal frames. // typedef void (*FP_INTERNAL_FRAME_ENUMERATION_CALLBACK)(const DebuggerIPCE_STRData * pFrameData, CALLBACK_DATA pUserData); virtual void EnumerateInternalFrames(VMPTR_Thread vmThread, FP_INTERNAL_FRAME_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData) = 0; // // Given the FramePointer of the parent frame and the FramePointer of the current frame, // check if the current frame is the parent frame. fpParent should have been returned // previously by the DacDbiInterface via GetStackWalkCurrentFrameInfo(). // // Arguments: // fpToCheck - the FramePointer of the current frame // fpParent - the FramePointer of the parent frame; should have been returned earlier by the DDI // // Return Value: // Return TRUE if the current frame is indeed the parent frame // // Note: // Because of the complexity involved in checking for the parent frame, we should always // ask the ExceptionTracker to do it. // virtual BOOL IsMatchingParentFrame(FramePointer fpToCheck, FramePointer fpParent) = 0; // // Return the stack parameter size of a given method. This is necessary on x86 for unwinding. // // Arguments: // controlPC - any address in the specified method; you can use the current PC of the stack frame // // Return Value: // Return the size of the stack parameters of the given method. // Return 0 for vararg methods. // // Assumptions: // The callee stack parameter size is constant throughout a method. // virtual ULONG32 GetStackParameterSize(CORDB_ADDRESS controlPC) = 0; // // Return the FramePointer of the current frame where the stackwalker is stopped at. // // Arguments: // pSFIHandle - the handle to the stackwalker // // Return Value: // the FramePointer of the current frame // // Notes: // The FramePointer of a stack frame is: // the stack address of the return address on x86, // the current SP on AMD64, // // On x86, to get the stack address of the return address, we need to unwind one more frame // and use the SP of the caller frame as the FramePointer of the callee frame. This // function does NOT do that. It just returns the SP. The caller needs to handle the // unwinding. // // The FramePointer of an explicit frame is just the stack address of the explicit frame. // virtual FramePointer GetFramePointer(StackWalkHandle pSFIHandle) = 0; // // Check whether the specified CONTEXT is the CONTEXT of the leaf frame. This function doesn't care // whether the leaf frame is native or managed. // // Arguments: // vmThread - the specified thread // pContext - the CONTEXT to check // // Return Value: // Return TRUE if the specified CONTEXT is the leaf CONTEXT. // // Notes: // Currently we check the specified CONTEXT against the filter CONTEXT first. // This will be deprecated in V3. // virtual BOOL IsLeafFrame(VMPTR_Thread vmThread, const DT_CONTEXT * pContext) = 0; // Get the context for a particular thread of the target process. // Arguments: // input: vmThread - the thread for which the context is required // output: pContextBuffer - the address of the CONTEXT to be initialized. // The memory for this belongs to the caller. It must not be NULL. // Note: throws virtual void GetContext(VMPTR_Thread vmThread, DT_CONTEXT * pContextBuffer) = 0; // // This is a simple helper function to convert a CONTEXT to a DebuggerREGDISPLAY. We need to do this // inside DDI because the RS has no notion of REGDISPLAY. // // Arguments: // pInContext - the CONTEXT to be converted // pOutDRD - the converted DebuggerREGDISPLAY // fActive - Indicate whether the CONTEXT is active or not. An active CONTEXT means that the // IP is the next instruction to be executed, not the return address of a function call. // The opposite of an active CONTEXT is an unwind CONTEXT, which is obtained from // unwinding. // virtual void ConvertContextToDebuggerRegDisplay(const DT_CONTEXT * pInContext, DebuggerREGDISPLAY * pOutDRD, BOOL fActive) = 0; typedef enum { kNone, kILStub, kLCGMethod, } DynamicMethodType; // // Check whether the specified method is an IL stub or an LCG method. This answer determines if we // need to expose the method in a V2-style stackwalk. // // Arguments: // vmMethodDesc - the method to be checked // // Return Value: // Return kNone if the method is neither an IL stub or an LCG method. // Return kILStub if the method is an IL stub. // Return kLCGMethod if the method is an LCG method. // virtual DynamicMethodType IsILStubOrLCGMethod(VMPTR_MethodDesc vmMethodDesc) = 0; // // Return a TargetBuffer for the raw vararg signature. // Also return the address of the first argument in the vararg signature. // // Arguments: // VASigCookieAddr - the target address of the VASigCookie pointer (double indirection) // pArgBase - out parameter; return the target address of the first word of the arguments // // Return Value: // Return a TargetBuffer for the raw vararg signature. // // Notes: // We can't take a VMPTR here because VASigCookieAddr does not come from the DDI. Instead, // we use the native variable information to figure out which stack slot contains the // VASigCookie pointer. So a remote address is all we have got. // // Ideally we should be able to return just a SigParser, but doing so has a not-so-trivial problem. // The memory used for the signature pointed to by the SigParser cannot be allocated in the DAC cache, // since it'll be used by mscordbi. We don't have a clean way to allocate memory in mscordbi without // breaking the Signature abstraction. // // The other option would be to create a new sub-type like "SignatureCopy" which allocates and frees // its own backing memory. Currently we don't want to share heaps between mscordacwks.dll and // mscordbi.dll, and so we would have to jump through some hoops to allocate with an allocator // in mscordbi.dll. // virtual TargetBuffer GetVarArgSig(CORDB_ADDRESS VASigCookieAddr, OUT CORDB_ADDRESS * pArgBase) = 0; // // Indicates if the specified type requires 8-byte alignment. // // Arguments: // thExact - the exact TypeHandle of the type to query // // Return Value: // TRUE if the type requires 8-byte alignment. // virtual BOOL RequiresAlign8(VMPTR_TypeHandle thExact) = 0; // // Resolve the raw generics token to the real generics type token. The resolution is based on the // given index. See Notes below. // // Arguments: // dwExactGenericArgsTokenIndex - the variable index of the generics type token // rawToken - the raw token to be resolved // // Return Value: // Return the actual generics type token. // // Notes: // DDI tells the RS which variable stores the generics type token, but DDI doesn't retrieve the value // of the variable itself. Instead, the RS retrieves the value of the variable. However, // in some cases, the variable value is not the generics type token. In this case, we need to // "resolve" the variable value to the generics type token. The RS should call this API to do that. // // If the index is 0, then the generics type token is the MethodTable of the "this" object. // rawToken will be the address of the "this" object. // // If the index is TYPECTXT_ILNUM, the generics type token is a secret argument. // It could be a MethodDesc or a MethodTable, and in this case no resolution is actually necessary. // rawToken will be the actual secret argument, and this API really is just a nop. // // However, we don't want the RS to know all this logic. // virtual GENERICS_TYPE_TOKEN ResolveExactGenericArgsToken(DWORD dwExactGenericArgsTokenIndex, GENERICS_TYPE_TOKEN rawToken) = 0; //----------------------------------------------------------------------------- // Functions to get information about code objects //----------------------------------------------------------------------------- // GetILCodeAndSig returns the function's ILCode and SigToken given // a module and a token. The info will come from a MethodDesc, if // one exists or from metadata. // // Arguments: // Input: // vmDomainAssembly - module containing metadata for the method // functionToken - metadata token for the function // Output (required): // codeInfo - start address and size of the IL // pLocalSigToken - signature token for the method virtual void GetILCodeAndSig(VMPTR_DomainAssembly vmDomainAssembly, mdToken functionToken, OUT TargetBuffer * pCodeInfo, OUT mdToken * pLocalSigToken) = 0; // Gets information about a native code blob: // it's method desc, whether it's an instantiated generic, its EnC version number // and hot and cold region information. // Arguments: // Input: // vmDomainAssembly - module containing metadata for the method // functionToken - token for the function for which we need code info // Output (required): // pCodeInfo - data structure describing the native code regions. // Notes: If the function is unjitted, the method desc will be NULL and the // output parameter will be invalid. In general, if the native start address // is unavailable for any reason, the output parameter will also be // invalid (i.e., pCodeInfo->IsValid is false). virtual void GetNativeCodeInfo(VMPTR_DomainAssembly vmDomainAssembly, mdToken functionToken, OUT NativeCodeFunctionData * pCodeInfo) = 0; // Gets information about a native code blob: // it's method desc, whether it's an instantiated generic, its EnC version number // and hot and cold region information. // This is similar to function above, just works from a different starting point // Also this version can get info for any particular EnC version instance // because they all have different start addresses whereas the above version gets // the most recent one // Arguments: // Input: // hotCodeStartAddr - the beginning of the code hot code region // Output (required): // pCodeInfo - data structure describing the native code regions. virtual void GetNativeCodeInfoForAddr(VMPTR_MethodDesc vmMethodDesc, CORDB_ADDRESS hotCodeStartAddr, NativeCodeFunctionData * pCodeInfo) = 0; //----------------------------------------------------------------------------- // Functions to get information about types //----------------------------------------------------------------------------- // Determine if a type is a ValueType // // Arguments: // input: vmTypeHandle - the type being checked (works even on unrestored types) // // Return: // TRUE iff the type is a ValueType virtual BOOL IsValueType (VMPTR_TypeHandle th) = 0; // Determine if a type has generic parameters // // Arguments: // input: vmTypeHandle - the type being checked (works even on unrestored types) // // Return: // TRUE iff the type has generic parameters virtual BOOL HasTypeParams (VMPTR_TypeHandle th) = 0; // Get type information for a class // // Arguments: // input: vmAppDomain - appdomain where we will fetch field data for the type // thExact - exact type handle for type // output: // pData - structure containing information about the class and its // fields virtual void GetClassInfo (VMPTR_AppDomain vmAppDomain, VMPTR_TypeHandle thExact, ClassInfo * pData) = 0; // get field information and object size for an instantiated generic // // Arguments: // input: vmDomainAssembly - module containing metadata for the type // thExact - exact type handle for type (may be NULL) // thApprox - approximate type handle for the type // output: // pFieldList - array of structures containing information about the fields. Clears any previous // contents. Allocated and initialized by this function. // pObjectSize - size of the instantiated object // virtual void GetInstantiationFieldInfo (VMPTR_DomainAssembly vmDomainAssembly, VMPTR_TypeHandle vmThExact, VMPTR_TypeHandle vmThApprox, OUT DacDbiArrayList<FieldData> * pFieldList, OUT SIZE_T * pObjectSize) = 0; // use a type handle to get the information needed to create the corresponding RS CordbType instance // // Arguments: // input: boxed - indicates what, if anything, is boxed. See code:AreValueTypesBoxed for more // specific information // vmAppDomain - module containing metadata for the type // vmTypeHandle - type handle for the type // output: pTypeInfo - holds information needed to build the corresponding CordbType // virtual void TypeHandleToExpandedTypeInfo(AreValueTypesBoxed boxed, VMPTR_AppDomain vmAppDomain, VMPTR_TypeHandle vmTypeHandle, DebuggerIPCE_ExpandedTypeData * pTypeInfo) = 0; virtual void GetObjectExpandedTypeInfo(AreValueTypesBoxed boxed, VMPTR_AppDomain vmAppDomain, CORDB_ADDRESS addr, OUT DebuggerIPCE_ExpandedTypeData * pTypeInfo) = 0; virtual void GetObjectExpandedTypeInfoFromID(AreValueTypesBoxed boxed, VMPTR_AppDomain vmAppDomain, COR_TYPEID id, OUT DebuggerIPCE_ExpandedTypeData * pTypeInfo) = 0; // Get type handle for a TypeDef token, if one exists. For generics this returns the open type. // Note there is no guarantee the returned handle will be fully restored (in pre-jit scenarios), // only that it exists. Later functions that use this type handle should fail if they require // information not yet available at the current restoration level // // Arguments: // input: vmModule - the module scope in which to look up the type def // metadataToken - the type definition to retrieve // // Return value: the type handle if it exists or throws CORDBG_E_CLASS_NOT_LOADED if it isn't loaded // virtual VMPTR_TypeHandle GetTypeHandle(VMPTR_Module vmModule, mdTypeDef metadataToken) = 0; // Get the approximate type handle for an instantiated type. This may be identical to the exact type handle, // but if we have code sharing for generics, it may differ in that it may have canonical type parameters. // This will occur if we have not yet loaded an exact type but we have loaded the canonical form of the // type. // // Arguments: // input: pTypeData - information needed to get the type handle, this includes a list of type parameters // and the number of entries in the list. Allocated and initialized by the caller. // Return value: the approximate type handle // virtual VMPTR_TypeHandle GetApproxTypeHandle(TypeInfoList * pTypeData) = 0; // Get the exact type handle from type data. // Arguments: // input: pTypeData - type information for the type. includes information about // the top-level type as well as information // about the element type for array types, the referent for // pointer types, or actual parameters for generic class or // valuetypes, as appropriate for the top-level type. // pArgInfo - This is preallocated and initialized by the caller and contains two fields: // genericArgsCount - number of type parameters (these may be actual type parameters // for generics or they may represent the element type or referent // type. // pGenericArgData - list of type parameters // vmTypeHandle - the exact type handle derived from the type information // Return Value: an HRESULT indicating the result of the operation virtual HRESULT GetExactTypeHandle(DebuggerIPCE_ExpandedTypeData * pTypeData, ArgInfoList * pArgInfo, VMPTR_TypeHandle& vmTypeHandle) = 0; // // Retrieve the generic type params for a given MethodDesc. This function is specifically // for stackwalking because it requires the generic type token on the stack. // // Arguments: // vmAppDomain - the appdomain of the MethodDesc // vmMethodDesc - the method in question // genericsToken - the generic type token in the stack frame owned by the method // // pcGenericClassTypeParams - out parameter; returns the number of type parameters for the class // containing the method in question; must not be NULL // pGenericTypeParams - out parameter; returns an array of type parameters and // the count of the total number of type parameters; must not be NULL // // Notes: // The memory for the array is allocated by this function on the Dbi heap. // The caller is responsible for releasing it. // virtual void GetMethodDescParams(VMPTR_AppDomain vmAppDomain, VMPTR_MethodDesc vmMethodDesc, GENERICS_TYPE_TOKEN genericsToken, OUT UINT32 * pcGenericClassTypeParams, OUT TypeParamsList * pGenericTypeParams) = 0; // Get the target field address of a thread local static. // Arguments: // input: vmField - pointer to the field descriptor for the static field // vmRuntimeThread - thread to which the static field belongs. This must // NOT be NULL // Return Value: The target address of the field if the field is allocated. // NULL if the field storage is not yet allocated. // // Note: // Static field storage is lazily allocated, so this may commonly return NULL. // This is an inspection only method and can not allocate the static storage. // Field storage is constant once allocated, so this value can be cached. virtual CORDB_ADDRESS GetThreadStaticAddress(VMPTR_FieldDesc vmField, VMPTR_Thread vmRuntimeThread) = 0; // Get the target field address of a collectible types static. // Arguments: // input: vmField - pointer to the field descriptor for the static field // vmAppDomain - AppDomain to which the static field belongs. This must // NOT be NULL // Return Value: The target address of the field if the field is allocated. // NULL if the field storage is not yet allocated. // // Note: // Static field storage may not exist yet, so this may commonly return NULL. // This is an inspection only method and can not allocate the static storage. // Field storage is not constant once allocated so this value can not be cached // across a Continue virtual CORDB_ADDRESS GetCollectibleTypeStaticAddress(VMPTR_FieldDesc vmField, VMPTR_AppDomain vmAppDomain) = 0; // Get information about a field added with Edit And Continue. // Arguments: // intput: pEnCFieldInfo - information about the EnC added field including: // object to which it belongs (if this is null the field is static) // the field token // the class token for the class to which the field was added // the offset to the fields // the domain file // an indication of the type: whether it's a class or value type // output: pFieldData - information about the EnC added field // pfStatic - flag to indicate whether the field is static virtual void GetEnCHangingFieldInfo(const EnCHangingFieldInfo * pEnCFieldInfo, OUT FieldData * pFieldData, OUT BOOL * pfStatic) = 0; // GetTypeHandleParams gets the necessary data for a type handle, i.e. its // type parameters, e.g. "String" and "List<int>" from the type handle // for "Dict<String,List<int>>", and sends it back to the right side. // Arguments: // input: vmAppDomain - app domain to which the type belongs // vmTypeHandle - type handle for the type // output: pParams - list of instances of DebuggerIPCE_ExpandedTypeData, // one for each type parameter. These will be used on the // RS to build up an instantiation which will allow // building an instance of CordbType for the top-level // type. The memory for this list is allocated on the dbi // heap in this function. // This will not fail except for OOM virtual void GetTypeHandleParams(VMPTR_AppDomain vmAppDomain, VMPTR_TypeHandle vmTypeHandle, OUT TypeParamsList * pParams) = 0; // GetSimpleType // gets the metadata token and domain file corresponding to a simple type // Arguments: // input: vmAppDomain - Appdomain in which simpleType resides // simpleType - CorElementType value corresponding to a simple type // output: pMetadataToken - the metadata token corresponding to simpleType, // in the scope of vmDomainAssembly. // vmDomainAssembly - the domainAssembly for simpleType // Notes: // This is inspection-only. If the type is not yet loaded, it will throw CORDBG_E_CLASS_NOT_LOADED. // It will not try to load a type. // If the type has been loaded, vmDomainAssembly will be non-null unless the target is somehow corrupted. // In that case, we will throw CORDBG_E_TARGET_INCONSISTENT. virtual void GetSimpleType(VMPTR_AppDomain vmAppDomain, CorElementType simpleType, OUT mdTypeDef * pMetadataToken, OUT VMPTR_Module * pVmModule, OUT VMPTR_DomainAssembly * pVmDomainAssembly) = 0; // for the specified object returns TRUE if the object derives from System.Exception virtual BOOL IsExceptionObject(VMPTR_Object vmObject) = 0; // gets the list of raw stack frames for the specified exception object virtual void GetStackFramesFromException(VMPTR_Object vmObject, DacDbiArrayList<DacExceptionCallStackData>& dacStackFrames) = 0; // Returns true if the argument is a runtime callable wrapper virtual BOOL IsRcw(VMPTR_Object vmObject) = 0; // retrieves the list of COM interfaces implemented by vmObject, as it is known at // the time of the call (the list may change as new interface types become available // in the runtime) virtual void GetRcwCachedInterfaceTypes( VMPTR_Object vmObject, VMPTR_AppDomain vmAppDomain, BOOL bIInspectableOnly, OUT DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> * pDacInterfaces) = 0; // retrieves the list of interfaces pointers implemented by vmObject, as it is known at // the time of the call (the list may change as new interface types become available // in the runtime) virtual void GetRcwCachedInterfacePointers( VMPTR_Object vmObject, BOOL bIInspectableOnly, OUT DacDbiArrayList<CORDB_ADDRESS> * pDacItfPtrs) = 0; // retrieves a list of interface types corresponding to the passed in // list of IIDs. the interface types are retrieved from an app domain // IID / Type cache, that is updated as new types are loaded. will // have NULL entries corresponding to unknown IIDs in "iids" virtual void GetCachedWinRTTypesForIIDs( VMPTR_AppDomain vmAppDomain, DacDbiArrayList<GUID> & iids, OUT DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> * pTypes) = 0; // retrieves the whole app domain cache of IID / Type mappings. virtual void GetCachedWinRTTypes( VMPTR_AppDomain vmAppDomain, OUT DacDbiArrayList<GUID> * piids, OUT DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> * pTypes) = 0; // ---------------------------------------------------------------------------- // functions to get information about reference/handle referents for ICDValue // ---------------------------------------------------------------------------- // Get object information for a TypedByRef object. Initializes the objRef and typedByRefType fields of // pObjectData (type info for the referent). // Arguments: // input: pTypedByRef - pointer to a TypedByRef struct // vmAppDomain - AppDomain for the type of the object referenced // output: pObjectData - information about the object referenced by pTypedByRef // Note: Throws virtual void GetTypedByRefInfo(CORDB_ADDRESS pTypedByRef, VMPTR_AppDomain vmAppDomain, DebuggerIPCE_ObjectData * pObjectData) = 0; // Get the string length and offset to string base for a string object // Arguments: // input: objPtr - address of a string object // output: pObjectData - fills in the string fields stringInfo.offsetToStringBase and // stringInfo.length // Note: throws virtual void GetStringData(CORDB_ADDRESS objectAddress, DebuggerIPCE_ObjectData * pObjectData) = 0; // Get information for an array type referent of an objRef, including rank, upper and lower bounds, // element size and type, and the number of elements. // Arguments: // input: objectAddress - the address of an array object // output: pObjectData - fills in the array-related fields: // arrayInfo.offsetToArrayBase, // arrayInfo.offsetToLowerBounds, // arrayInfo.offsetToUpperBounds, // arrayInfo.componentCount, // arrayInfo.rank, // arrayInfo.elementSize, // Note: throws virtual void GetArrayData(CORDB_ADDRESS objectAddress, DebuggerIPCE_ObjectData * pObjectData) = 0; // Get information about an object for which we have a reference, including the object size and // type information. // Arguments: // input: objectAddress - address of the object for which we want information // type - the basic type of the object (we may find more specific type // information for the object) // vmAppDomain - the appdomain to which the object belong // output: pObjectData - fills in the size and type information fields // Note: throws virtual void GetBasicObjectInfo(CORDB_ADDRESS objectAddress, CorElementType type, VMPTR_AppDomain vmAppDomain, DebuggerIPCE_ObjectData * pObjectData) = 0; // -------------------------------------------------------------------------------------------- #ifdef TEST_DATA_CONSISTENCY // Determine whether a crst is held by the left side. When the DAC is executing VM code that takes a // lock, we want to know whether the LS already holds that lock. If it does, we will assume the locked // data is in an inconsistent state and will throw an exception, rather than relying on this data. This // function is part of a self-test that will ensure we are correctly detecting when the LS holds a lock // on data the RS is trying to inspect. // Argument: // input: vmCrst - the lock to test // output: none // Notes: // Throws // For this code to run, the environment variable TestDataConsistency must be set to 1. virtual void TestCrst(VMPTR_Crst vmCrst) = 0; // Determine whether a crst is held by the left side. When the DAC is executing VM code that takes a // lock, we want to know whether the LS already holds that lock. If it does, we will assume the locked // data is in an inconsistent state and will throw an exception, rather than relying on this data. This // function is part of a self-test that will ensure we are correctly detecting when the LS holds a lock // on data the RS is trying to inspect. // Argument: // input: vmRWLock - the lock to test // output: none // Notes: // Throws // For this code to run, the environment variable TestDataConsistency must be set to 1. virtual void TestRWLock(VMPTR_SimpleRWLock vmRWLock) = 0; #endif // -------------------------------------------------------------------------------------------- // Get the address of the Debugger control block on the helper thread. The debugger control block // contains information about the status of the debugger, handles to various events and space to hold // information sent back and forth between the debugger and the debuggee's helper thread. // Arguments: none // Return Value: The remote address of the Debugger control block allocated on the helper thread // if it has been successfully allocated or NULL otherwise. virtual CORDB_ADDRESS GetDebuggerControlBlockAddress() = 0; // Creates a VMPTR of an Object. The Object is found by dereferencing ptr // as though it is a target address to an OBJECTREF. This is similar to // GetObject with another level of indirection. // // Arguments: // ptr - A target address pointing to an OBJECTREF // // Return Value: // A VMPTR to the Object which ptr points to // // Notes: // The VMPTR this produces can be deconstructed by GetObjectContents. // This function will throw if given a NULL or otherwise invalid pointer, // but if given a valid address to an invalid pointer, it will produce // a VMPTR_Object which points to invalid memory. virtual VMPTR_Object GetObjectFromRefPtr(CORDB_ADDRESS ptr) = 0; // Creates a VMPTR of an Object. The Object is assumed to be at the target // address supplied by ptr // // Arguments: // ptr - A target address to an Object // // Return Value: // A VMPTR to the Object which was at ptr // // Notes: // The VMPTR this produces can be deconstructed by GetObjectContents. // This will produce a VMPTR_Object regardless of whether the pointer is // valid or not. virtual VMPTR_Object GetObject(CORDB_ADDRESS ptr) = 0; // Sets state in the native binder. // // Arguments: // ePolicy - the NGEN policy to change // // Return Value: // HRESULT indicating if the state was successfully updated // virtual HRESULT EnableNGENPolicy(CorDebugNGENPolicy ePolicy) = 0; // Sets the NGEN compiler flags. This restricts NGEN to only use images with certain // types of pregenerated code. With respect to debugging this is used to specify that // the NGEN image must be debuggable aka non-optimized code. Note that these flags // are merged with other sources of configuration so it is possible that the final // result retrieved from GetDesiredNGENCompilerFlags does not match what was specfied // in this call. // // If an NGEN image of the appropriate type isn't available then one of two things happens: // a) the NGEN image isn't loaded and CLR loads the MSIL image instead // b) the NGEN image is loaded, but we don't use the pregenerated code it contains // and instead use only the MSIL and metadata // // This function is only legal to call at app startup before any decisions have been // made about NGEN image loading. Once we begin loading this configuration is immutable. // // // Arguments: // dwFlags - the new NGEN compiler flags that should go into effect // // Return Value: // HRESULT indicating if the state was successfully updated. On error the // current flags in effect will not have changed. // virtual HRESULT SetNGENCompilerFlags(DWORD dwFlags) = 0; // Gets the NGEN compiler flags currently in effect. This accounts for settings that // were caused by SetDesiredNGENCompilerFlags as well as other configuration sources. // See SetDesiredNGENCompilerFlags for more info // // Arguments: // pdwFlags - the NGEN compiler flags currently in effect // // Return Value: // HRESULT indicating if the state was successfully retrieved. // virtual HRESULT GetNGENCompilerFlags(DWORD *pdwFlags) = 0; // Create a VMPTR_OBJECTHANDLE from a CORDB_ADDRESS pointing to an object handle // // Arguments: // handle: target address of a GC handle // // ReturnValue: // returns a VMPTR_OBJECTHANDLE with the handle as the m_addr field // // Notes: // This will produce a VMPTR_OBJECTHANDLE regardless of whether handle is // valid. // Ideally we'd be using only strongly-typed variables on the RS, and then this would be unnecessary virtual VMPTR_OBJECTHANDLE GetVmObjectHandle(CORDB_ADDRESS handleAddress) = 0; // Validate that the VMPTR_OBJECTHANDLE refers to a legitimate managed object // // Arguments: // handle: the GC handle to be validated // // Return value: // TRUE if the object appears to be valid (its a heuristic), FALSE if it definately is not valid // virtual BOOL IsVmObjectHandleValid(VMPTR_OBJECTHANDLE vmHandle) = 0; // indicates if the specified module is a WinRT module // // Arguments: // vmModule: the module to check // isWinRT: out parameter indicating state of module // // Return value: // S_OK indicating that the operation succeeded // virtual HRESULT IsWinRTModule(VMPTR_Module vmModule, BOOL& isWinRT) = 0; // Determines the app domain id for the object refered to by a given VMPTR_OBJECTHANDLE // // Arguments: // handle: the GC handle which refers to the object of interest // // Return value: // The app domain id of the object of interest // // This may throw if the object handle is corrupt (it doesn't refer to a managed object) virtual ULONG GetAppDomainIdFromVmObjectHandle(VMPTR_OBJECTHANDLE vmHandle) = 0; // Get the target address from a VMPTR_OBJECTHANDLE, i.e., the handle address // Arguments: // vmHandle - (input) the VMPTR_OBJECTHANDLE from which we need the target address // Return value: the target address from the VMPTR_OBJECTHANDLE // virtual CORDB_ADDRESS GetHandleAddressFromVmHandle(VMPTR_OBJECTHANDLE vmHandle) = 0; // Given a VMPTR to an Object return the target address // // Arguments: // obj - the Object VMPTR to get the address from // // Return Value: // Return the target address which obj is using // // Notes: // The VMPTR this consumes can be reconstructed using GetObject and // providing the address stored in the returned TargetBuffer. This has // undefined behavior for invalid VMPTR_Objects. virtual TargetBuffer GetObjectContents(VMPTR_Object obj) = 0; // The callback used to enumerate blocking objects typedef void (*FP_BLOCKINGOBJECT_ENUMERATION_CALLBACK)(DacBlockingObject blockingObject, CALLBACK_DATA pUserData); // // Enumerate all monitors blocking a thread // // Arguments: // vmThread - the thread to get monitor data for // fpCallback - callback to invoke on the blocking data for each monitor // pUserData - user data to supply for each callback. // // Return Value: // Returns on success. Throws on error. // // virtual void EnumerateBlockingObjects(VMPTR_Thread vmThread, FP_BLOCKINGOBJECT_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData) = 0; // // Returns the thread which owns the monitor lock on an object and the acquisition // count // // Arguments: // vmObject - The object to check for ownership // // Return Value: // Throws on error. Inside the structure we have: // pVmThread - the owning or thread or VMPTR_Thread::NullPtr() if unowned // pAcquisitionCount - the number of times the lock would need to be released in // order for it to be unowned // virtual MonitorLockInfo GetThreadOwningMonitorLock(VMPTR_Object vmObject) = 0; // // Enumerate all threads waiting on the monitor event for an object // // Arguments: // vmObject - the object whose monitor event we are interested in // fpCallback - callback to invoke on each thread in the queue // pUserData - user data to supply for each callback. // // Return Value: // Returns on success. Throws on error. // // virtual void EnumerateMonitorEventWaitList(VMPTR_Object vmObject, FP_THREAD_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData) = 0; // // Returns the managed debugging flags for the process (a combination // of the CLR_DEBUGGING_PROCESS_FLAGS flags). This function specifies, // beyond whether or not a managed debug event is pending, also if the // event (if one exists) is caused by a Debugger.Launch(). This is // important b/c Debugger.Launch calls should *NOT* cause the debugger // to terminate the process when the attach is canceled. virtual CLR_DEBUGGING_PROCESS_FLAGS GetAttachStateFlags() = 0; virtual bool GetMetaDataFileInfoFromPEFile(VMPTR_PEAssembly vmPEAssembly, DWORD & dwTimeStamp, DWORD & dwImageSize, bool & isNGEN, IStringHolder* pStrFilename) = 0; virtual bool GetILImageInfoFromNgenPEFile(VMPTR_PEAssembly vmPEAssembly, DWORD & dwTimeStamp, DWORD & dwSize, IStringHolder* pStrFilename) = 0; virtual bool IsThreadSuspendedOrHijacked(VMPTR_Thread vmThread) = 0; typedef void* * HeapWalkHandle; // Returns true if it is safe to walk the heap. If this function returns false, // you could still create a heap walk and attempt to walk it, but there's no // telling how much of the heap will be available. virtual bool AreGCStructuresValid() = 0; // Creates a HeapWalkHandle which can be used to walk the managed heap with the // WalkHeap function. Note if this function completes successfully you will need // to delete the handle by passing it into DeleteHeapWalk. // // Arguments: // pHandle - the location to store the heap walk handle in // // Returns: // S_OK on success, an error code on failure. virtual HRESULT CreateHeapWalk(OUT HeapWalkHandle * pHandle) = 0; // Deletes the give HeapWalkHandle. Note you must call this function if // CreateHeapWalk returns success. virtual void DeleteHeapWalk(HeapWalkHandle handle) = 0; // Walks the heap using the given heap walk handle, enumerating objects // on the managed heap. Note that walking the heap requires that the GC // data structures be in a valid state, which you can find by calling // AreGCStructuresValid. // // Arguments: // handle - a HeapWalkHandle obtained from CreateHeapWalk // count - the number of object addresses to obtain; pValues must // be at least as large as count // objects - the location to stuff the object addresses found during // the heap walk; this array should be at least "count" in // length; this field must not be null // pFetched - a location to store the actual number of values filled // into pValues; this field must not be null // // Returns: // S_OK on success, a failure HRESULT otherwise. // // Note: // You should iteratively call WalkHeap requesting more values until // *pFetched != count.. This signifies that we have reached the end // of the heap walk. virtual HRESULT WalkHeap(HeapWalkHandle handle, ULONG count, OUT COR_HEAPOBJECT * objects, OUT ULONG * pFetched) = 0; virtual HRESULT GetHeapSegments(OUT DacDbiArrayList<COR_SEGMENT> * pSegments) = 0; virtual bool IsValidObject(CORDB_ADDRESS obj) = 0; virtual bool GetAppDomainForObject(CORDB_ADDRESS obj, OUT VMPTR_AppDomain * pApp, OUT VMPTR_Module * pModule, OUT VMPTR_DomainAssembly * pDomainAssembly) = 0; // Reference Walking. // Creates a reference walk. // Parameters: // pHandle - out - the reference walk handle to create // walkStacks - in - whether or not to report stack references // walkFQ - in - whether or not to report references from the finalizer queue // handleWalkMask - in - the types of handles report (see CorGCReferenceType, cordebug.idl) // Returns: // An HRESULT indicating whether it succeded or failed. // Exceptions: // Does not throw, but does not catch exceptions either. virtual HRESULT CreateRefWalk(OUT RefWalkHandle * pHandle, BOOL walkStacks, BOOL walkFQ, UINT32 handleWalkMask) = 0; // Deletes a reference walk. // Parameters: // handle - in - the handle of the reference walk to delete // Excecptions: // Does not throw, but does not catch exceptions either. virtual void DeleteRefWalk(RefWalkHandle handle) = 0; // Enumerates GC references in the process based on the parameters passed to CreateRefWalk. // Parameters: // handle - in - the RefWalkHandle to enumerate // count - in - the capacity of "refs" // refs - in/out - an array to write the references to // pFetched - out - the number of references written virtual HRESULT WalkRefs(RefWalkHandle handle, ULONG count, OUT DacGcReference * refs, OUT ULONG * pFetched) = 0; virtual HRESULT GetTypeID(CORDB_ADDRESS obj, COR_TYPEID * pType) = 0; virtual HRESULT GetTypeIDForType(VMPTR_TypeHandle vmTypeHandle, COR_TYPEID *pId) = 0; virtual HRESULT GetObjectFields(COR_TYPEID id, ULONG32 celt, OUT COR_FIELD * layout, OUT ULONG32 * pceltFetched) = 0; virtual HRESULT GetTypeLayout(COR_TYPEID id, COR_TYPE_LAYOUT * pLayout) = 0; virtual HRESULT GetArrayLayout(COR_TYPEID id, COR_ARRAY_LAYOUT * pLayout) = 0; virtual void GetGCHeapInformation(OUT COR_HEAPINFO * pHeapInfo) = 0; // If a PEAssembly has an RW capable IMDInternalImport, this returns the address of the MDInternalRW // object which implements it. // // // Arguments: // vmPEAssembly - target PEAssembly to get metadata MDInternalRW for. // pAddrMDInternalRW - If a PEAssembly has an RW capable IMDInternalImport, this will be set to the address // of the MDInternalRW object which implements it. Otherwise it will be NULL. // virtual HRESULT GetPEFileMDInternalRW(VMPTR_PEAssembly vmPEAssembly, OUT TADDR* pAddrMDInternalRW) = 0; // DEPRECATED - use GetActiveRejitILCodeVersionNode // Retrieves the active ReJitInfo for a given module/methodDef, if it exists. // Active is defined as after GetReJitParameters returns from the profiler dll and // no call to Revert has completed yet. // // // Arguments: // vmModule - The module to search in // methodTk - The methodDef token indicates the method within the module to check // pReJitInfo - [out] The RejitInfo request, if any, that is active on this method. If no request // is active this will be pReJitInfo->IsNull() == TRUE. // // Returns: // S_OK regardless of whether a rejit request is active or not, as long as the answer is certain // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible // virtual HRESULT GetReJitInfo(VMPTR_Module vmModule, mdMethodDef methodTk, OUT VMPTR_ReJitInfo* pReJitInfo) = 0; // DEPRECATED - use GetNativeCodeVersionNode // Retrieves the ReJitInfo for a given MethodDesc/code address, if it exists. // // // Arguments: // vmMethod - The method to look for // codeStartAddress - The code start address disambiguates between multiple rejitted instances // of the method. // pReJitInfo - [out] The RejitInfo request that corresponds to this MethodDesc/code address, if it exists. // NULL otherwise. // // Returns: // S_OK regardless of whether a rejit request is active or not, as long as the answer is certain // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible // virtual HRESULT GetReJitInfo(VMPTR_MethodDesc vmMethod, CORDB_ADDRESS codeStartAddress, OUT VMPTR_ReJitInfo* pReJitInfo) = 0; // DEPRECATED - use GetILCodeVersion // Retrieves the SharedReJitInfo for a given ReJitInfo. // // // Arguments: // vmReJitInfo - The ReJitInfo to inspect // pSharedReJitInfo - [out] The SharedReJitInfo that is pointed to by vmReJitInfo. // // Returns: // S_OK if no error // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible // virtual HRESULT GetSharedReJitInfo(VMPTR_ReJitInfo vmReJitInfo, VMPTR_SharedReJitInfo* pSharedReJitInfo) = 0; // DEPRECATED - use GetILCodeVersionData // Retrieves useful data from a SharedReJitInfo such as IL code and IL mapping. // // // Arguments: // sharedReJitInfo - The SharedReJitInfo to inspect // pData - [out] Various properties of the SharedReJitInfo such as IL code and IL mapping. // // Returns: // S_OK if no error // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible // virtual HRESULT GetSharedReJitInfoData(VMPTR_SharedReJitInfo sharedReJitInfo, DacSharedReJitInfo* pData) = 0; // Retrieves a bit field indicating which defines were in use when clr was built. This only includes // defines that are specified in the Debugger::_Target_Defines enumeration, which is a small subset of // all defines. // // // Arguments: // pDefines - [out] The set of defines clr.dll was built with. Bit offsets are encoded using the // enumeration Debugger::_Target_Defines // // Returns: // S_OK if no error // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible // virtual HRESULT GetDefinesBitField(ULONG32 *pDefines) = 0; // Retrieves a version number indicating the shape of the data structures used in the Metadata implementation // inside clr.dll. This number changes anytime a datatype layout changes so that they can be correctly // deserialized from out of process // // // Arguments: // pMDStructuresVersion - [out] The layout version number for metadata data structures. See // Debugger::Debugger() in Debug\ee\Debugger.cpp for a description of the options. // // Returns: // S_OK if no error // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible // virtual HRESULT GetMDStructuresVersion(ULONG32* pMDStructuresVersion) = 0; // Retrieves the active rejit ILCodeVersionNode for a given module/methodDef, if it exists. // Active is defined as after GetReJitParameters returns from the profiler dll and // no call to Revert has completed yet. // // // Arguments: // vmModule - The module to search in // methodTk - The methodDef token indicates the method within the module to check // pILCodeVersionNode - [out] The Rejit request, if any, that is active on this method. If no request // is active this will be pILCodeVersionNode->IsNull() == TRUE. // // Returns: // S_OK regardless of whether a rejit request is active or not, as long as the answer is certain // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible // virtual HRESULT GetActiveRejitILCodeVersionNode(VMPTR_Module vmModule, mdMethodDef methodTk, OUT VMPTR_ILCodeVersionNode* pVmILCodeVersionNode) = 0; // Retrieves the NativeCodeVersionNode for a given MethodDesc/code address, if it exists. // NOTE: The initial (default) code generated for a MethodDesc is a valid MethodDesc/code address pair but it won't have a corresponding // NativeCodeVersionNode. // // // Arguments: // vmMethod - The method to look for // codeStartAddress - The code start address disambiguates between multiple jitted instances of the method. // pVmNativeCodeVersionNode - [out] The NativeCodeVersionNode request that corresponds to this MethodDesc/code address, if it exists. // NULL otherwise. // // Returns: // S_OK regardless of whether a rejit request is active or not, as long as the answer is certain // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible // virtual HRESULT GetNativeCodeVersionNode(VMPTR_MethodDesc vmMethod, CORDB_ADDRESS codeStartAddress, OUT VMPTR_NativeCodeVersionNode* pVmNativeCodeVersionNode) = 0; // Retrieves the ILCodeVersionNode for a given NativeCodeVersionNode. // This may return a NULL node if the native code belongs to the default IL version for this this method. // // // Arguments: // vmNativeCodeVersionNode - The NativeCodeVersionNode to inspect // pVmILCodeVersionNode - [out] The ILCodeVersionNode that is pointed to by vmNativeCodeVersionNode, if any. // // Returns: // S_OK if no error // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible // virtual HRESULT GetILCodeVersionNode(VMPTR_NativeCodeVersionNode vmNativeCodeVersionNode, VMPTR_ILCodeVersionNode* pVmILCodeVersionNode) = 0; // Retrieves useful data from an ILCodeVersion such as IL code and IL mapping. // // // Arguments: // ilCodeVersionNode - The ILCodeVersionNode to inspect // pData - [out] Various properties of the ILCodeVersionNode such as IL code and IL mapping. // // Returns: // S_OK if no error // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible // virtual HRESULT GetILCodeVersionNodeData(VMPTR_ILCodeVersionNode ilCodeVersionNode, DacSharedReJitInfo* pData) = 0; // Enable or disable the GC notification events. The GC notification events are turned off by default // They will be delivered through ICorDebugManagedCallback4 // // // Arguments: // fEnable - true to enable the events, false to disable // // Returns: // S_OK if no error // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible // virtual HRESULT EnableGCNotificationEvents(BOOL fEnable) = 0; typedef enum { kClosedDelegate, kOpenDelegate, kOpenInstanceVSD, kClosedStaticWithScpecialSig, kTrueMulticastDelegate, kWrapperDelegate, kUnmanagedFunctionDelegate, kUnknownDelegateType } DelegateType; // Returns true if the object is a type deriving from System.MulticastDelegate // // Arguments: // vmObject - pointer to runtime object to query for. // virtual BOOL IsDelegate(VMPTR_Object vmObject) = 0; // Returns the delegate type virtual HRESULT GetDelegateType(VMPTR_Object delegateObject, DelegateType *delegateType) = 0; virtual HRESULT GetDelegateFunctionData( DelegateType delegateType, VMPTR_Object delegateObject, OUT VMPTR_DomainAssembly *ppFunctionDomainAssembly, OUT mdMethodDef *pMethodDef) = 0; virtual HRESULT GetDelegateTargetObject( DelegateType delegateType, VMPTR_Object delegateObject, OUT VMPTR_Object *ppTargetObj, OUT VMPTR_AppDomain *ppTargetAppDomain) = 0; virtual HRESULT GetLoaderHeapMemoryRanges(OUT DacDbiArrayList<COR_MEMORY_RANGE> *pRanges) = 0; virtual HRESULT IsModuleMapped(VMPTR_Module pModule, OUT BOOL *isModuleMapped) = 0; virtual bool MetadataUpdatesApplied() = 0; // The following tag tells the DD-marshalling tool to stop scanning. // END_MARSHAL //----------------------------------------------------------------------------- // Utility interface used for passing strings out of these APIs. The caller // provides an implementation of this that uses whatever memory allocation // strategy it desires, and IDacDbiInterface APIs will call AssignCopy in order // to pass back the contents of strings. // // This permits the client and implementation of IDacDbiInterface to be in // different DLLs with their own heap allocation mechanism, while avoiding // the ugly and verbose 2-call C-style string passing API pattern. //----------------------------------------------------------------------------- class IStringHolder { public: // // Store a copy of of the provided string. // // Arguments: // psz - The null-terminated unicode string to copy. // // Return Value: // S_OK on success, typical HRESULT return values on failure. // // Notes: // The underlying object is responsible for allocating and freeing the // memory for this copy. The object must not store the value of psz, // it is no longer valid after this call returns. // virtual HRESULT AssignCopy(const WCHAR * psz) = 0; }; //----------------------------------------------------------------------------- // Interface for allocations // This lets DD allocate buffers to pass back to DBI; and thus avoids // the common 2-step (query size/allocate/query data) pattern. // // Note that mscordacwks.dll and clients cannot share the same heap allocator, // DAC statically links the CRT to avoid run-time dependencies on non-OS libraries. //----------------------------------------------------------------------------- class IAllocator { public: // Allocate // Expected to throw on error. virtual void * Alloc(SIZE_T lenBytes) = 0; // Free. This shouldn't throw. virtual void Free(void * p) = 0; }; //----------------------------------------------------------------------------- // Callback interface to provide Metadata lookup. //----------------------------------------------------------------------------- class IMetaDataLookup { public: // // Lookup a metadata importer via PEAssembly. // // Returns: // A IMDInternalImport used by dac-ized VM code. The object is NOT addref-ed. See lifespan notes below. // Returns NULL if no importer is available. // Throws on exceptional circumstances (eg, detects the debuggee is corrupted). // // Notes: // IMDInternalImport is a property of PEAssembly. The DAC-ized code uses it as a weak reference, // and so we avoid doing an AddRef() here because that would mean we need to add Release() calls // in DAC-only paths. // The metadata importers are not DAC-ized, and thus we have a local copy in the host. // If it was dac-ized, then DAC would get the importer just like any other field. // // lifespan of returned object: // - DBI owns the metadata importers. // - DBI must not free the importer without calling Flush() on DAC first. // - DAC will only invoke this when in a DD primitive, which was in turn invoked by DBI. // - For performance reasons, we want to allow DAC to cache this between Flush() calls. // - If DAC caches the importer, it will only use it when DBI invokes a DD primitive. // - the reference count of the returned object is not adjusted. // virtual IMDInternalImport * LookupMetaData(VMPTR_PEAssembly addressPEAssembly, bool &isILMetaDataForNGENImage) = 0; }; }; // end IDacDbiInterface #endif // _DACDBI_INTERFACE_H_
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/jit/lower.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Lower XX XX XX XX Preconditions: XX XX XX XX Postconditions (for the nodes currently handled): XX XX - All operands requiring a register are explicit in the graph XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "lower.h" #if !defined(TARGET_64BIT) #include "decomposelongs.h" #endif // !defined(TARGET_64BIT) //------------------------------------------------------------------------ // MakeSrcContained: Make "childNode" a contained node // // Arguments: // parentNode - is a non-leaf node that can contain its 'childNode' // childNode - is an op that will now be contained by its parent. // // Notes: // If 'childNode' it has any existing sources, they will now be sources for the parent. // void Lowering::MakeSrcContained(GenTree* parentNode, GenTree* childNode) const { assert(!parentNode->OperIsLeaf()); assert(childNode->canBeContained()); childNode->SetContained(); assert(childNode->isContained()); #ifdef DEBUG if (IsContainableMemoryOp(childNode)) { // Verify caller of this method checked safety. // const bool isSafeToContainMem = IsSafeToContainMem(parentNode, childNode); if (!isSafeToContainMem) { JITDUMP("** Unsafe mem containment of [%06u] in [%06u}, comp->dspTreeID(childNode), " "comp->dspTreeID(parentNode)\n"); assert(isSafeToContainMem); } } #endif } //------------------------------------------------------------------------ // CheckImmedAndMakeContained: Checks if the 'childNode' is a containable immediate // and, if so, makes it contained. // // Arguments: // parentNode - is any non-leaf node // childNode - is an child op of 'parentNode' // // Return value: // true if we are able to make childNode a contained immediate // bool Lowering::CheckImmedAndMakeContained(GenTree* parentNode, GenTree* childNode) { assert(!parentNode->OperIsLeaf()); // If childNode is a containable immediate if (IsContainableImmed(parentNode, childNode)) { // then make it contained within the parentNode MakeSrcContained(parentNode, childNode); return true; } return false; } //------------------------------------------------------------------------ // IsSafeToContainMem: Checks for conflicts between childNode and parentNode, // and returns 'true' iff memory operand childNode can be contained in parentNode. // // Arguments: // parentNode - any non-leaf node // childNode - some node that is an input to `parentNode` // // Return value: // true if it is safe to make childNode a contained memory operand. // bool Lowering::IsSafeToContainMem(GenTree* parentNode, GenTree* childNode) const { // Quick early-out for unary cases // if (childNode->gtNext == parentNode) { return true; } m_scratchSideEffects.Clear(); m_scratchSideEffects.AddNode(comp, childNode); for (GenTree* node = childNode->gtNext; node != parentNode; node = node->gtNext) { const bool strict = true; if (m_scratchSideEffects.InterferesWith(comp, node, strict)) { return false; } } return true; } //------------------------------------------------------------------------ // IsSafeToContainMem: Checks for conflicts between childNode and grandParentNode // and returns 'true' iff memory operand childNode can be contained in ancestorNode // // Arguments: // grandParentNode - any non-leaf node // parentNode - parent of `childNode` and an input to `grandParentNode` // childNode - some node that is an input to `parentNode` // // Return value: // true if it is safe to make childNode a contained memory operand. // bool Lowering::IsSafeToContainMem(GenTree* grandparentNode, GenTree* parentNode, GenTree* childNode) const { m_scratchSideEffects.Clear(); m_scratchSideEffects.AddNode(comp, childNode); for (GenTree* node = childNode->gtNext; node != grandparentNode; node = node->gtNext) { if (node == parentNode) { continue; } const bool strict = true; if (m_scratchSideEffects.InterferesWith(comp, node, strict)) { return false; } } return true; } //------------------------------------------------------------------------ // LowerNode: this is the main entry point for Lowering. // // Arguments: // node - the node we are lowering. // // Returns: // next node in the transformed node sequence that needs to be lowered. // GenTree* Lowering::LowerNode(GenTree* node) { assert(node != nullptr); switch (node->gtOper) { case GT_NULLCHECK: case GT_IND: LowerIndir(node->AsIndir()); break; case GT_STOREIND: LowerStoreIndirCommon(node->AsStoreInd()); break; case GT_ADD: { GenTree* next = LowerAdd(node->AsOp()); if (next != nullptr) { return next; } } break; #if !defined(TARGET_64BIT) case GT_ADD_LO: case GT_ADD_HI: case GT_SUB_LO: case GT_SUB_HI: #endif case GT_SUB: case GT_AND: case GT_OR: case GT_XOR: return LowerBinaryArithmetic(node->AsOp()); case GT_MUL: case GT_MULHI: #if defined(TARGET_X86) || defined(TARGET_ARM64) case GT_MUL_LONG: #endif return LowerMul(node->AsOp()); case GT_UDIV: case GT_UMOD: if (!LowerUnsignedDivOrMod(node->AsOp())) { ContainCheckDivOrMod(node->AsOp()); } break; case GT_DIV: case GT_MOD: return LowerSignedDivOrMod(node); case GT_SWITCH: return LowerSwitch(node); case GT_CALL: LowerCall(node); break; case GT_LT: case GT_LE: case GT_GT: case GT_GE: case GT_EQ: case GT_NE: case GT_TEST_EQ: case GT_TEST_NE: case GT_CMP: return LowerCompare(node); case GT_JTRUE: return LowerJTrue(node->AsOp()); case GT_JMP: LowerJmpMethod(node); break; case GT_RETURN: LowerRet(node->AsUnOp()); break; case GT_RETURNTRAP: ContainCheckReturnTrap(node->AsOp()); break; case GT_CAST: LowerCast(node); break; #if defined(TARGET_XARCH) || defined(TARGET_ARM64) case GT_BOUNDS_CHECK: ContainCheckBoundsChk(node->AsBoundsChk()); break; #endif // TARGET_XARCH case GT_ARR_ELEM: return LowerArrElem(node); case GT_ARR_OFFSET: ContainCheckArrOffset(node->AsArrOffs()); break; case GT_ROL: case GT_ROR: LowerRotate(node); break; #ifndef TARGET_64BIT case GT_LSH_HI: case GT_RSH_LO: ContainCheckShiftRotate(node->AsOp()); break; #endif // !TARGET_64BIT case GT_LSH: case GT_RSH: case GT_RSZ: #if defined(TARGET_XARCH) || defined(TARGET_ARM64) LowerShift(node->AsOp()); #else ContainCheckShiftRotate(node->AsOp()); #endif break; case GT_STORE_BLK: case GT_STORE_OBJ: if (node->AsBlk()->Data()->IsCall()) { LowerStoreSingleRegCallStruct(node->AsBlk()); break; } FALLTHROUGH; case GT_STORE_DYN_BLK: LowerBlockStoreCommon(node->AsBlk()); break; case GT_LCLHEAP: ContainCheckLclHeap(node->AsOp()); break; #ifdef TARGET_XARCH case GT_INTRINSIC: ContainCheckIntrinsic(node->AsOp()); break; #endif // TARGET_XARCH #ifdef FEATURE_SIMD case GT_SIMD: LowerSIMD(node->AsSIMD()); break; #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: LowerHWIntrinsic(node->AsHWIntrinsic()); break; #endif // FEATURE_HW_INTRINSICS case GT_LCL_FLD: { // We should only encounter this for lclVars that are lvDoNotEnregister. verifyLclFldDoNotEnregister(node->AsLclVarCommon()->GetLclNum()); break; } case GT_LCL_VAR: { GenTreeLclVar* lclNode = node->AsLclVar(); WidenSIMD12IfNecessary(lclNode); LclVarDsc* varDsc = comp->lvaGetDesc(lclNode); // The consumer of this node must check compatibility of the fields. // This merely checks whether it is possible for this to be a multireg node. if (lclNode->IsMultiRegLclVar()) { if (!varDsc->lvPromoted || (comp->lvaGetPromotionType(varDsc) != Compiler::PROMOTION_TYPE_INDEPENDENT) || (varDsc->lvFieldCnt > MAX_MULTIREG_COUNT)) { lclNode->ClearMultiReg(); if (lclNode->TypeIs(TYP_STRUCT)) { comp->lvaSetVarDoNotEnregister(lclNode->GetLclNum() DEBUGARG(DoNotEnregisterReason::BlockOp)); } } } break; } case GT_STORE_LCL_VAR: WidenSIMD12IfNecessary(node->AsLclVarCommon()); FALLTHROUGH; case GT_STORE_LCL_FLD: LowerStoreLocCommon(node->AsLclVarCommon()); break; #if defined(TARGET_ARM64) case GT_CMPXCHG: CheckImmedAndMakeContained(node, node->AsCmpXchg()->gtOpComparand); break; case GT_XORR: case GT_XAND: case GT_XADD: CheckImmedAndMakeContained(node, node->AsOp()->gtOp2); break; #elif defined(TARGET_XARCH) case GT_XORR: case GT_XAND: case GT_XADD: if (node->IsUnusedValue()) { node->ClearUnusedValue(); // Make sure the types are identical, since the node type is changed to VOID // CodeGen relies on op2's type to determine the instruction size. // Note that the node type cannot be a small int but the data operand can. assert(genActualType(node->gtGetOp2()->TypeGet()) == node->TypeGet()); node->SetOper(GT_LOCKADD); node->gtType = TYP_VOID; CheckImmedAndMakeContained(node, node->gtGetOp2()); } break; #endif #ifndef TARGET_ARMARCH // TODO-ARMARCH-CQ: We should contain this as long as the offset fits. case GT_OBJ: if (node->AsObj()->Addr()->OperIsLocalAddr()) { node->AsObj()->Addr()->SetContained(); } break; #endif // !TARGET_ARMARCH case GT_KEEPALIVE: node->gtGetOp1()->SetRegOptional(); break; case GT_LCL_FLD_ADDR: case GT_LCL_VAR_ADDR: { const GenTreeLclVarCommon* lclAddr = node->AsLclVarCommon(); const LclVarDsc* varDsc = comp->lvaGetDesc(lclAddr); if (!varDsc->lvDoNotEnregister) { // TODO-Cleanup: this is definitely not the best place for this detection, // but for now it is the easiest. Move it to morph. comp->lvaSetVarDoNotEnregister(lclAddr->GetLclNum() DEBUGARG(DoNotEnregisterReason::LclAddrNode)); } } break; default: break; } return node->gtNext; } /** -- Switch Lowering -- * The main idea of switch lowering is to keep transparency of the register requirements of this node * downstream in LSRA. Given that the switch instruction is inherently a control statement which in the JIT * is represented as a simple tree node, at the time we actually generate code for it we end up * generating instructions that actually modify the flow of execution that imposes complicated * register requirement and lifetimes. * * So, for the purpose of LSRA, we want to have a more detailed specification of what a switch node actually * means and more importantly, which and when do we need a register for each instruction we want to issue * to correctly allocate them downstream. * * For this purpose, this procedure performs switch lowering in two different ways: * * a) Represent the switch statement as a zero-index jump table construct. This means that for every destination * of the switch, we will store this destination in an array of addresses and the code generator will issue * a data section where this array will live and will emit code that based on the switch index, will indirect and * jump to the destination specified in the jump table. * * For this transformation we introduce a new GT node called GT_SWITCH_TABLE that is a specialization of the switch * node for jump table based switches. * The overall structure of a GT_SWITCH_TABLE is: * * GT_SWITCH_TABLE * |_________ localVar (a temporary local that holds the switch index) * |_________ jumpTable (this is a special node that holds the address of the jump table array) * * Now, the way we morph a GT_SWITCH node into this lowered switch table node form is the following: * * Input: GT_SWITCH (inside a basic block whose Branch Type is BBJ_SWITCH) * |_____ expr (an arbitrarily complex GT_NODE that represents the switch index) * * This gets transformed into the following statements inside a BBJ_COND basic block (the target would be * the default case of the switch in case the conditional is evaluated to true). * * ----- original block, transformed * GT_STORE_LCL_VAR tempLocal (a new temporary local variable used to store the switch index) * |_____ expr (the index expression) * * GT_JTRUE * |_____ GT_COND * |_____ GT_GE * |___ Int_Constant (This constant is the index of the default case * that happens to be the highest index in the jump table). * |___ tempLocal (The local variable were we stored the index expression). * * ----- new basic block * GT_SWITCH_TABLE * |_____ tempLocal * |_____ jumpTable (a new jump table node that now LSRA can allocate registers for explicitly * and LinearCodeGen will be responsible to generate downstream). * * This way there are no implicit temporaries. * * b) For small-sized switches, we will actually morph them into a series of conditionals of the form * if (case falls into the default){ goto jumpTable[size]; // last entry in the jump table is the default case } * (For the default case conditional, we'll be constructing the exact same code as the jump table case one). * else if (case == firstCase){ goto jumpTable[1]; } * else if (case == secondCase) { goto jumptable[2]; } and so on. * * This transformation is of course made in JIT-IR, not downstream to CodeGen level, so this way we no longer * require internal temporaries to maintain the index we're evaluating plus we're using existing code from * LinearCodeGen to implement this instead of implement all the control flow constructs using InstrDscs and * InstrGroups downstream. */ GenTree* Lowering::LowerSwitch(GenTree* node) { unsigned jumpCnt; unsigned targetCnt; BasicBlock** jumpTab; assert(node->gtOper == GT_SWITCH); // The first step is to build the default case conditional construct that is // shared between both kinds of expansion of the switch node. // To avoid confusion, we'll alias m_block to originalSwitchBB // that represents the node we're morphing. BasicBlock* originalSwitchBB = m_block; LIR::Range& switchBBRange = LIR::AsRange(originalSwitchBB); // jumpCnt is the number of elements in the jump table array. // jumpTab is the actual pointer to the jump table array. // targetCnt is the number of unique targets in the jump table array. jumpCnt = originalSwitchBB->bbJumpSwt->bbsCount; jumpTab = originalSwitchBB->bbJumpSwt->bbsDstTab; targetCnt = originalSwitchBB->NumSucc(comp); // GT_SWITCH must be a top-level node with no use. #ifdef DEBUG { LIR::Use use; assert(!switchBBRange.TryGetUse(node, &use)); } #endif JITDUMP("Lowering switch " FMT_BB ", %d cases\n", originalSwitchBB->bbNum, jumpCnt); // Handle a degenerate case: if the switch has only a default case, just convert it // to an unconditional branch. This should only happen in minopts or with debuggable // code. if (targetCnt == 1) { JITDUMP("Lowering switch " FMT_BB ": single target; converting to BBJ_ALWAYS\n", originalSwitchBB->bbNum); noway_assert(comp->opts.OptimizationDisabled()); if (originalSwitchBB->bbNext == jumpTab[0]) { originalSwitchBB->bbJumpKind = BBJ_NONE; originalSwitchBB->bbJumpDest = nullptr; } else { originalSwitchBB->bbJumpKind = BBJ_ALWAYS; originalSwitchBB->bbJumpDest = jumpTab[0]; } // Remove extra predecessor links if there was more than one case. for (unsigned i = 1; i < jumpCnt; ++i) { (void)comp->fgRemoveRefPred(jumpTab[i], originalSwitchBB); } // We have to get rid of the GT_SWITCH node but a child might have side effects so just assign // the result of the child subtree to a temp. GenTree* rhs = node->AsOp()->gtOp1; unsigned lclNum = comp->lvaGrabTemp(true DEBUGARG("Lowering is creating a new local variable")); comp->lvaTable[lclNum].lvType = rhs->TypeGet(); GenTreeLclVar* store = comp->gtNewStoreLclVar(lclNum, rhs); switchBBRange.InsertAfter(node, store); switchBBRange.Remove(node); return store; } noway_assert(jumpCnt >= 2); // Spill the argument to the switch node into a local so that it can be used later. LIR::Use use(switchBBRange, &(node->AsOp()->gtOp1), node); ReplaceWithLclVar(use); // GT_SWITCH(indexExpression) is now two statements: // 1. a statement containing 'asg' (for temp = indexExpression) // 2. and a statement with GT_SWITCH(temp) assert(node->gtOper == GT_SWITCH); GenTree* temp = node->AsOp()->gtOp1; assert(temp->gtOper == GT_LCL_VAR); unsigned tempLclNum = temp->AsLclVarCommon()->GetLclNum(); var_types tempLclType = temp->TypeGet(); BasicBlock* defaultBB = jumpTab[jumpCnt - 1]; BasicBlock* followingBB = originalSwitchBB->bbNext; /* Is the number of cases right for a test and jump switch? */ const bool fFirstCaseFollows = (followingBB == jumpTab[0]); const bool fDefaultFollows = (followingBB == defaultBB); unsigned minSwitchTabJumpCnt = 2; // table is better than just 2 cmp/jcc // This means really just a single cmp/jcc (aka a simple if/else) if (fFirstCaseFollows || fDefaultFollows) { minSwitchTabJumpCnt++; } #if defined(TARGET_ARM) // On ARM for small switch tables we will // generate a sequence of compare and branch instructions // because the code to load the base of the switch // table is huge and hideous due to the relocation... :( minSwitchTabJumpCnt += 2; #endif // TARGET_ARM // Once we have the temporary variable, we construct the conditional branch for // the default case. As stated above, this conditional is being shared between // both GT_SWITCH lowering code paths. // This condition is of the form: if (temp > jumpTableLength - 2){ goto jumpTable[jumpTableLength - 1]; } GenTree* gtDefaultCaseCond = comp->gtNewOperNode(GT_GT, TYP_INT, comp->gtNewLclvNode(tempLclNum, tempLclType), comp->gtNewIconNode(jumpCnt - 2, genActualType(tempLclType))); // Make sure we perform an unsigned comparison, just in case the switch index in 'temp' // is now less than zero 0 (that would also hit the default case). gtDefaultCaseCond->gtFlags |= GTF_UNSIGNED; GenTree* gtDefaultCaseJump = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, gtDefaultCaseCond); gtDefaultCaseJump->gtFlags = node->gtFlags; LIR::Range condRange = LIR::SeqTree(comp, gtDefaultCaseJump); switchBBRange.InsertAtEnd(std::move(condRange)); BasicBlock* afterDefaultCondBlock = comp->fgSplitBlockAfterNode(originalSwitchBB, condRange.LastNode()); // afterDefaultCondBlock is now the switch, and all the switch targets have it as a predecessor. // originalSwitchBB is now a BBJ_NONE, and there is a predecessor edge in afterDefaultCondBlock // representing the fall-through flow from originalSwitchBB. assert(originalSwitchBB->bbJumpKind == BBJ_NONE); assert(originalSwitchBB->bbNext == afterDefaultCondBlock); assert(afterDefaultCondBlock->bbJumpKind == BBJ_SWITCH); assert(afterDefaultCondBlock->bbJumpSwt->bbsHasDefault); assert(afterDefaultCondBlock->isEmpty()); // Nothing here yet. // The GT_SWITCH code is still in originalSwitchBB (it will be removed later). // Turn originalSwitchBB into a BBJ_COND. originalSwitchBB->bbJumpKind = BBJ_COND; originalSwitchBB->bbJumpDest = jumpTab[jumpCnt - 1]; // Fix the pred for the default case: the default block target still has originalSwitchBB // as a predecessor, but the fgSplitBlockAfterStatement() moved all predecessors to point // to afterDefaultCondBlock. flowList* oldEdge = comp->fgRemoveRefPred(jumpTab[jumpCnt - 1], afterDefaultCondBlock); comp->fgAddRefPred(jumpTab[jumpCnt - 1], originalSwitchBB, oldEdge); bool useJumpSequence = jumpCnt < minSwitchTabJumpCnt; if (TargetOS::IsUnix && TargetArchitecture::IsArm32) { // Force using an inlined jumping instead switch table generation. // Switch jump table is generated with incorrect values in CoreRT case, // so any large switch will crash after loading to PC any such value. // I think this is due to the fact that we use absolute addressing // instead of relative. But in CoreRT is used as a rule relative // addressing when we generate an executable. // See also https://github.com/dotnet/runtime/issues/8683 // Also https://github.com/dotnet/coreclr/pull/13197 useJumpSequence = useJumpSequence || comp->IsTargetAbi(CORINFO_CORERT_ABI); } // If we originally had 2 unique successors, check to see whether there is a unique // non-default case, in which case we can eliminate the switch altogether. // Note that the single unique successor case is handled above. BasicBlock* uniqueSucc = nullptr; if (targetCnt == 2) { uniqueSucc = jumpTab[0]; noway_assert(jumpCnt >= 2); for (unsigned i = 1; i < jumpCnt - 1; i++) { if (jumpTab[i] != uniqueSucc) { uniqueSucc = nullptr; break; } } } if (uniqueSucc != nullptr) { // If the unique successor immediately follows this block, we have nothing to do - // it will simply fall-through after we remove the switch, below. // Otherwise, make this a BBJ_ALWAYS. // Now, fixup the predecessor links to uniqueSucc. In the original jumpTab: // jumpTab[i-1] was the default target, which we handled above, // jumpTab[0] is the first target, and we'll leave that predecessor link. // Remove any additional predecessor links to uniqueSucc. for (unsigned i = 1; i < jumpCnt - 1; ++i) { assert(jumpTab[i] == uniqueSucc); (void)comp->fgRemoveRefPred(uniqueSucc, afterDefaultCondBlock); } if (afterDefaultCondBlock->bbNext == uniqueSucc) { afterDefaultCondBlock->bbJumpKind = BBJ_NONE; afterDefaultCondBlock->bbJumpDest = nullptr; } else { afterDefaultCondBlock->bbJumpKind = BBJ_ALWAYS; afterDefaultCondBlock->bbJumpDest = uniqueSucc; } } // If the number of possible destinations is small enough, we proceed to expand the switch // into a series of conditional branches, otherwise we follow the jump table based switch // transformation. else if (useJumpSequence || comp->compStressCompile(Compiler::STRESS_SWITCH_CMP_BR_EXPANSION, 50)) { // Lower the switch into a series of compare and branch IR trees. // // In this case we will morph the node in the following way: // 1. Generate a JTRUE statement to evaluate the default case. (This happens above.) // 2. Start splitting the switch basic block into subsequent basic blocks, each of which will contain // a statement that is responsible for performing a comparison of the table index and conditional // branch if equal. JITDUMP("Lowering switch " FMT_BB ": using compare/branch expansion\n", originalSwitchBB->bbNum); // We'll use 'afterDefaultCondBlock' for the first conditional. After that, we'll add new // blocks. If we end up not needing it at all (say, if all the non-default cases just fall through), // we'll delete it. bool fUsedAfterDefaultCondBlock = false; BasicBlock* currentBlock = afterDefaultCondBlock; LIR::Range* currentBBRange = &LIR::AsRange(currentBlock); // Walk to entries 0 to jumpCnt - 1. If a case target follows, ignore it and let it fall through. // If no case target follows, the last one doesn't need to be a compare/branch: it can be an // unconditional branch. bool fAnyTargetFollows = false; for (unsigned i = 0; i < jumpCnt - 1; ++i) { assert(currentBlock != nullptr); // Remove the switch from the predecessor list of this case target's block. // We'll add the proper new predecessor edge later. flowList* oldEdge = comp->fgRemoveRefPred(jumpTab[i], afterDefaultCondBlock); if (jumpTab[i] == followingBB) { // This case label follows the switch; let it fall through. fAnyTargetFollows = true; continue; } // We need a block to put in the new compare and/or branch. // If we haven't used the afterDefaultCondBlock yet, then use that. if (fUsedAfterDefaultCondBlock) { BasicBlock* newBlock = comp->fgNewBBafter(BBJ_NONE, currentBlock, true); comp->fgAddRefPred(newBlock, currentBlock); // The fall-through predecessor. currentBlock = newBlock; currentBBRange = &LIR::AsRange(currentBlock); } else { assert(currentBlock == afterDefaultCondBlock); fUsedAfterDefaultCondBlock = true; } // We're going to have a branch, either a conditional or unconditional, // to the target. Set the target. currentBlock->bbJumpDest = jumpTab[i]; // Wire up the predecessor list for the "branch" case. comp->fgAddRefPred(jumpTab[i], currentBlock, oldEdge); if (!fAnyTargetFollows && (i == jumpCnt - 2)) { // We're processing the last one, and there is no fall through from any case // to the following block, so we can use an unconditional branch to the final // case: there is no need to compare against the case index, since it's // guaranteed to be taken (since the default case was handled first, above). currentBlock->bbJumpKind = BBJ_ALWAYS; } else { // Otherwise, it's a conditional branch. Set the branch kind, then add the // condition statement. currentBlock->bbJumpKind = BBJ_COND; // Now, build the conditional statement for the current case that is // being evaluated: // GT_JTRUE // |__ GT_COND // |____GT_EQ // |____ (switchIndex) (The temp variable) // |____ (ICon) (The actual case constant) GenTree* gtCaseCond = comp->gtNewOperNode(GT_EQ, TYP_INT, comp->gtNewLclvNode(tempLclNum, tempLclType), comp->gtNewIconNode(i, tempLclType)); GenTree* gtCaseBranch = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, gtCaseCond); LIR::Range caseRange = LIR::SeqTree(comp, gtCaseBranch); currentBBRange->InsertAtEnd(std::move(caseRange)); } } if (fAnyTargetFollows) { // There is a fall-through to the following block. In the loop // above, we deleted all the predecessor edges from the switch. // In this case, we need to add one back. comp->fgAddRefPred(currentBlock->bbNext, currentBlock); } if (!fUsedAfterDefaultCondBlock) { // All the cases were fall-through! We don't need this block. // Convert it from BBJ_SWITCH to BBJ_NONE and unset the BBF_DONT_REMOVE flag // so fgRemoveBlock() doesn't complain. JITDUMP("Lowering switch " FMT_BB ": all switch cases were fall-through\n", originalSwitchBB->bbNum); assert(currentBlock == afterDefaultCondBlock); assert(currentBlock->bbJumpKind == BBJ_SWITCH); currentBlock->bbJumpKind = BBJ_NONE; currentBlock->bbFlags &= ~BBF_DONT_REMOVE; comp->fgRemoveBlock(currentBlock, /* unreachable */ false); // It's an empty block. } } else { // At this point the default case has already been handled and we need to generate a jump // table based switch or a bit test based switch at the end of afterDefaultCondBlock. Both // switch variants need the switch value so create the necessary LclVar node here. GenTree* switchValue = comp->gtNewLclvNode(tempLclNum, tempLclType); LIR::Range& switchBlockRange = LIR::AsRange(afterDefaultCondBlock); switchBlockRange.InsertAtEnd(switchValue); // Try generating a bit test based switch first, // if that's not possible a jump table based switch will be generated. if (!TryLowerSwitchToBitTest(jumpTab, jumpCnt, targetCnt, afterDefaultCondBlock, switchValue)) { JITDUMP("Lowering switch " FMT_BB ": using jump table expansion\n", originalSwitchBB->bbNum); #ifdef TARGET_64BIT if (tempLclType != TYP_I_IMPL) { // SWITCH_TABLE expects the switch value (the index into the jump table) to be TYP_I_IMPL. // Note that the switch value is unsigned so the cast should be unsigned as well. switchValue = comp->gtNewCastNode(TYP_I_IMPL, switchValue, true, TYP_U_IMPL); switchBlockRange.InsertAtEnd(switchValue); } #endif GenTree* switchTable = comp->gtNewJmpTableNode(); GenTree* switchJump = comp->gtNewOperNode(GT_SWITCH_TABLE, TYP_VOID, switchValue, switchTable); switchBlockRange.InsertAfter(switchValue, switchTable, switchJump); // this block no longer branches to the default block afterDefaultCondBlock->bbJumpSwt->removeDefault(); } comp->fgInvalidateSwitchDescMapEntry(afterDefaultCondBlock); } GenTree* next = node->gtNext; // Get rid of the GT_SWITCH(temp). switchBBRange.Remove(node->AsOp()->gtOp1); switchBBRange.Remove(node); return next; } //------------------------------------------------------------------------ // TryLowerSwitchToBitTest: Attempts to transform a jump table switch into a bit test. // // Arguments: // jumpTable - The jump table // jumpCount - The number of blocks in the jump table // targetCount - The number of distinct blocks in the jump table // bbSwitch - The switch block // switchValue - A LclVar node that provides the switch value // // Return value: // true if the switch has been lowered to a bit test // // Notes: // If the jump table contains less than 32 (64 on 64 bit targets) entries and there // are at most 2 distinct jump targets then the jump table can be converted to a word // of bits where a 0 bit corresponds to one jump target and a 1 bit corresponds to the // other jump target. Instead of the indirect jump a BT-JCC sequence is used to jump // to the appropriate target: // mov eax, 245 ; jump table converted to a "bit table" // bt eax, ebx ; ebx is supposed to contain the switch value // jc target1 // target0: // ... // target1: // Such code is both shorter and faster (in part due to the removal of a memory load) // than the traditional jump table base code. And of course, it also avoids the need // to emit the jump table itself that can reach up to 256 bytes (for 64 entries). // bool Lowering::TryLowerSwitchToBitTest( BasicBlock* jumpTable[], unsigned jumpCount, unsigned targetCount, BasicBlock* bbSwitch, GenTree* switchValue) { #ifndef TARGET_XARCH // Other architectures may use this if they substitute GT_BT with equivalent code. return false; #else assert(jumpCount >= 2); assert(targetCount >= 2); assert(bbSwitch->bbJumpKind == BBJ_SWITCH); assert(switchValue->OperIs(GT_LCL_VAR)); // // Quick check to see if it's worth going through the jump table. The bit test switch supports // up to 2 targets but targetCount also includes the default block so we need to allow 3 targets. // We'll ensure that there are only 2 targets when building the bit table. // if (targetCount > 3) { return false; } // // The number of bits in the bit table is the same as the number of jump table entries. But the // jump table also includes the default target (at the end) so we need to ignore it. The default // has already been handled by a JTRUE(GT(switchValue, jumpCount - 2)) that LowerSwitch generates. // const unsigned bitCount = jumpCount - 1; if (bitCount > (genTypeSize(TYP_I_IMPL) * 8)) { return false; } // // Build a bit table where a bit set to 0 corresponds to bbCase0 and a bit set to 1 corresponds to // bbCase1. Simply use the first block in the jump table as bbCase1, later we can invert the bit // table and/or swap the blocks if it's beneficial. // BasicBlock* bbCase0 = nullptr; BasicBlock* bbCase1 = jumpTable[0]; size_t bitTable = 1; for (unsigned bitIndex = 1; bitIndex < bitCount; bitIndex++) { if (jumpTable[bitIndex] == bbCase1) { bitTable |= (size_t(1) << bitIndex); } else if (bbCase0 == nullptr) { bbCase0 = jumpTable[bitIndex]; } else if (jumpTable[bitIndex] != bbCase0) { // If it's neither bbCase0 nor bbCase1 then it means we have 3 targets. There can't be more // than 3 because of the check at the start of the function. assert(targetCount == 3); return false; } } // // One of the case blocks has to follow the switch block. This requirement could be avoided // by adding a BBJ_ALWAYS block after the switch block but doing that sometimes negatively // impacts register allocation. // if ((bbSwitch->bbNext != bbCase0) && (bbSwitch->bbNext != bbCase1)) { return false; } #ifdef TARGET_64BIT // // See if we can avoid a 8 byte immediate on 64 bit targets. If all upper 32 bits are 1 // then inverting the bit table will make them 0 so that the table now fits in 32 bits. // Note that this does not change the number of bits in the bit table, it just takes // advantage of the fact that loading a 32 bit immediate into a 64 bit register zero // extends the immediate value to 64 bit. // if (~bitTable <= UINT32_MAX) { bitTable = ~bitTable; std::swap(bbCase0, bbCase1); } #endif // // Rewire the blocks as needed and figure out the condition to use for JCC. // GenCondition bbSwitchCondition; bbSwitch->bbJumpKind = BBJ_COND; comp->fgRemoveAllRefPreds(bbCase1, bbSwitch); comp->fgRemoveAllRefPreds(bbCase0, bbSwitch); if (bbSwitch->bbNext == bbCase0) { // GenCondition::C generates JC so we jump to bbCase1 when the bit is set bbSwitchCondition = GenCondition::C; bbSwitch->bbJumpDest = bbCase1; comp->fgAddRefPred(bbCase0, bbSwitch); comp->fgAddRefPred(bbCase1, bbSwitch); } else { assert(bbSwitch->bbNext == bbCase1); // GenCondition::NC generates JNC so we jump to bbCase0 when the bit is not set bbSwitchCondition = GenCondition::NC; bbSwitch->bbJumpDest = bbCase0; comp->fgAddRefPred(bbCase0, bbSwitch); comp->fgAddRefPred(bbCase1, bbSwitch); } // // Append BT(bitTable, switchValue) and JCC(condition) to the switch block. // var_types bitTableType = (bitCount <= (genTypeSize(TYP_INT) * 8)) ? TYP_INT : TYP_LONG; GenTree* bitTableIcon = comp->gtNewIconNode(bitTable, bitTableType); GenTree* bitTest = comp->gtNewOperNode(GT_BT, TYP_VOID, bitTableIcon, switchValue); bitTest->gtFlags |= GTF_SET_FLAGS; GenTreeCC* jcc = new (comp, GT_JCC) GenTreeCC(GT_JCC, bbSwitchCondition); jcc->gtFlags |= GTF_USE_FLAGS; LIR::AsRange(bbSwitch).InsertAfter(switchValue, bitTableIcon, bitTest, jcc); return true; #endif // TARGET_XARCH } // NOTE: this method deliberately does not update the call arg table. It must only // be used by NewPutArg and LowerArg; these functions are responsible for updating // the call arg table as necessary. void Lowering::ReplaceArgWithPutArgOrBitcast(GenTree** argSlot, GenTree* putArgOrBitcast) { assert(argSlot != nullptr); assert(*argSlot != nullptr); assert(putArgOrBitcast->OperIsPutArg() || putArgOrBitcast->OperIs(GT_BITCAST)); GenTree* arg = *argSlot; // Replace the argument with the putarg/copy *argSlot = putArgOrBitcast; putArgOrBitcast->AsOp()->gtOp1 = arg; // Insert the putarg/copy into the block BlockRange().InsertAfter(arg, putArgOrBitcast); } //------------------------------------------------------------------------ // NewPutArg: rewrites the tree to put an arg in a register or on the stack. // // Arguments: // call - the call whose arg is being rewritten. // arg - the arg being rewritten. // info - the fgArgTabEntry information for the argument. // type - the type of the argument. // // Return Value: // The new tree that was created to put the arg in the right place // or the incoming arg if the arg tree was not rewritten. // // Assumptions: // call, arg, and info must be non-null. // // Notes: // For System V systems with native struct passing (i.e. UNIX_AMD64_ABI defined) // this method allocates a single GT_PUTARG_REG for 1 eightbyte structs and a GT_FIELD_LIST of two GT_PUTARG_REGs // for two eightbyte structs. // // For STK passed structs the method generates GT_PUTARG_STK tree. For System V systems with native struct passing // (i.e. UNIX_AMD64_ABI defined) this method also sets the GC pointers count and the pointers // layout object, so the codegen of the GT_PUTARG_STK could use this for optimizing copying to the stack by value. // (using block copy primitives for non GC pointers and a single TARGET_POINTER_SIZE copy with recording GC info.) // GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* info, var_types type) { assert(call != nullptr); assert(arg != nullptr); assert(info != nullptr); GenTree* putArg = nullptr; bool isOnStack = (info->GetRegNum() == REG_STK); #ifdef TARGET_ARMARCH // Mark contained when we pass struct // GT_FIELD_LIST is always marked contained when it is generated if (type == TYP_STRUCT) { arg->SetContained(); if ((arg->OperGet() == GT_OBJ) && (arg->AsObj()->Addr()->OperGet() == GT_LCL_VAR_ADDR)) { MakeSrcContained(arg, arg->AsObj()->Addr()); } } #endif #if FEATURE_ARG_SPLIT // Struct can be split into register(s) and stack on ARM if (compFeatureArgSplit() && info->IsSplit()) { assert(arg->OperGet() == GT_OBJ || arg->OperGet() == GT_FIELD_LIST); // TODO: Need to check correctness for FastTailCall if (call->IsFastTailCall()) { #ifdef TARGET_ARM NYI_ARM("lower: struct argument by fast tail call"); #endif // TARGET_ARM } const unsigned slotNumber = info->GetByteOffset() / TARGET_POINTER_SIZE; DEBUG_ARG_SLOTS_ASSERT(slotNumber == info->slotNum); const bool putInIncomingArgArea = call->IsFastTailCall(); putArg = new (comp, GT_PUTARG_SPLIT) GenTreePutArgSplit(arg, info->GetByteOffset(), #if defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK) info->GetStackByteSize(), slotNumber, info->GetStackSlotsNumber(), #elif defined(DEBUG_ARG_SLOTS) && !defined(FEATURE_PUT_STRUCT_ARG_STK) slotNumber, #elif !defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK) info->GetStackByteSize(), #endif info->numRegs, call, putInIncomingArgArea); // If struct argument is morphed to GT_FIELD_LIST node(s), // we can know GC info by type of each GT_FIELD_LIST node. // So we skip setting GC Pointer info. // GenTreePutArgSplit* argSplit = putArg->AsPutArgSplit(); for (unsigned regIndex = 0; regIndex < info->numRegs; regIndex++) { argSplit->SetRegNumByIdx(info->GetRegNum(regIndex), regIndex); } if (arg->OperGet() == GT_OBJ) { ClassLayout* layout = arg->AsObj()->GetLayout(); // Set type of registers for (unsigned index = 0; index < info->numRegs; index++) { argSplit->m_regType[index] = layout->GetGCPtrType(index); } } else { unsigned regIndex = 0; for (GenTreeFieldList::Use& use : arg->AsFieldList()->Uses()) { if (regIndex >= info->numRegs) { break; } var_types regType = use.GetNode()->TypeGet(); // Account for the possibility that float fields may be passed in integer registers. if (varTypeIsFloating(regType) && !genIsValidFloatReg(argSplit->GetRegNumByIdx(regIndex))) { regType = (regType == TYP_FLOAT) ? TYP_INT : TYP_LONG; } argSplit->m_regType[regIndex] = regType; regIndex++; } // Clear the register assignment on the fieldList node, as these are contained. arg->SetRegNum(REG_NA); } } else #endif // FEATURE_ARG_SPLIT { if (!isOnStack) { #if FEATURE_MULTIREG_ARGS if ((info->numRegs > 1) && (arg->OperGet() == GT_FIELD_LIST)) { unsigned int regIndex = 0; for (GenTreeFieldList::Use& use : arg->AsFieldList()->Uses()) { regNumber argReg = info->GetRegNum(regIndex); GenTree* curOp = use.GetNode(); var_types curTyp = curOp->TypeGet(); // Create a new GT_PUTARG_REG node with op1 GenTree* newOper = comp->gtNewPutArgReg(curTyp, curOp, argReg); // Splice in the new GT_PUTARG_REG node in the GT_FIELD_LIST ReplaceArgWithPutArgOrBitcast(&use.NodeRef(), newOper); regIndex++; } // Just return arg. The GT_FIELD_LIST is not replaced. // Nothing more to do. return arg; } else #endif // FEATURE_MULTIREG_ARGS { putArg = comp->gtNewPutArgReg(type, arg, info->GetRegNum()); } } else { // Mark this one as tail call arg if it is a fast tail call. // This provides the info to put this argument in in-coming arg area slot // instead of in out-going arg area slot. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG // Make sure state is correct. The PUTARG_STK has TYP_VOID, as it doesn't produce // a result. So the type of its operand must be the correct type to push on the stack. // For a FIELD_LIST, this will be the type of the field (not the type of the arg), // but otherwise it is generally the type of the operand. info->checkIsStruct(); #endif if ((arg->OperGet() != GT_FIELD_LIST)) { #if defined(FEATURE_SIMD) && defined(FEATURE_PUT_STRUCT_ARG_STK) if (type == TYP_SIMD12) { #if !defined(TARGET_64BIT) assert(info->GetByteSize() == 12); #else // TARGET_64BIT if (compMacOsArm64Abi()) { assert(info->GetByteSize() == 12); } else { assert(info->GetByteSize() == 16); } #endif // TARGET_64BIT } else #endif // defined(FEATURE_SIMD) && defined(FEATURE_PUT_STRUCT_ARG_STK) { assert(genActualType(arg->TypeGet()) == type); } } const unsigned slotNumber = info->GetByteOffset() / TARGET_POINTER_SIZE; const bool putInIncomingArgArea = call->IsFastTailCall(); putArg = new (comp, GT_PUTARG_STK) GenTreePutArgStk(GT_PUTARG_STK, TYP_VOID, arg, info->GetByteOffset(), #if defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK) info->GetStackByteSize(), slotNumber, info->GetStackSlotsNumber(), #elif defined(DEBUG_ARG_SLOTS) && !defined(FEATURE_PUT_STRUCT_ARG_STK) slotNumber, #elif !defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK) info->GetStackByteSize(), #endif call, putInIncomingArgArea); #ifdef FEATURE_PUT_STRUCT_ARG_STK // If the ArgTabEntry indicates that this arg is a struct // get and store the number of slots that are references. // This is later used in the codegen for PUT_ARG_STK implementation // for struct to decide whether and how many single eight-byte copies // to be done (only for reference slots), so gcinfo is emitted. // For non-reference slots faster/smaller size instructions are used - // pair copying using XMM registers or rep mov instructions. if (info->isStruct) { // We use GT_OBJ only for non-lclVar, non-SIMD, non-FIELD_LIST struct arguments. if (arg->OperIsLocal()) { // This must have a type with a known size (SIMD or has been morphed to a primitive type). assert(arg->TypeGet() != TYP_STRUCT); } else if (arg->OperIs(GT_OBJ)) { assert(!varTypeIsSIMD(arg)); #ifdef TARGET_X86 // On x86 VM lies about the type of a struct containing a pointer sized // integer field by returning the type of its field as the type of struct. // Such struct can be passed in a register depending its position in // parameter list. VM does this unwrapping only one level and therefore // a type like Struct Foo { Struct Bar { int f}} awlays needs to be // passed on stack. Also, VM doesn't lie about type of such a struct // when it is a field of another struct. That is VM doesn't lie about // the type of Foo.Bar // // We now support the promotion of fields that are of type struct. // However we only support a limited case where the struct field has a // single field and that single field must be a scalar type. Say Foo.Bar // field is getting passed as a parameter to a call, Since it is a TYP_STRUCT, // as per x86 ABI it should always be passed on stack. Therefore GenTree // node under a PUTARG_STK could be GT_OBJ(GT_LCL_VAR_ADDR(v1)), where // local v1 could be a promoted field standing for Foo.Bar. Note that // the type of v1 will be the type of field of Foo.Bar.f when Foo is // promoted. That is v1 will be a scalar type. In this case we need to // pass v1 on stack instead of in a register. // // TODO-PERF: replace GT_OBJ(GT_LCL_VAR_ADDR(v1)) with v1 if v1 is // a scalar type and the width of GT_OBJ matches the type size of v1. // Note that this cannot be done till call node arguments are morphed // because we should not lose the fact that the type of argument is // a struct so that the arg gets correctly marked to be passed on stack. GenTree* objOp1 = arg->gtGetOp1(); if (objOp1->OperGet() == GT_LCL_VAR_ADDR) { unsigned lclNum = objOp1->AsLclVarCommon()->GetLclNum(); if (comp->lvaTable[lclNum].lvType != TYP_STRUCT) { comp->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } } #endif // TARGET_X86 } else if (!arg->OperIs(GT_FIELD_LIST)) { #ifdef TARGET_ARM assert((info->GetStackSlotsNumber() == 1) || ((arg->TypeGet() == TYP_DOUBLE) && (info->GetStackSlotsNumber() == 2))); #else assert(varTypeIsSIMD(arg) || (info->GetStackSlotsNumber() == 1)); #endif } } #endif // FEATURE_PUT_STRUCT_ARG_STK } } JITDUMP("new node is : "); DISPNODE(putArg); JITDUMP("\n"); if (arg->gtFlags & GTF_LATE_ARG) { putArg->gtFlags |= GTF_LATE_ARG; } return putArg; } //------------------------------------------------------------------------ // LowerArg: Lower one argument of a call. This entails splicing a "putarg" node between // the argument evaluation and the call. This is the point at which the source is // consumed and the value transitions from control of the register allocator to the calling // convention. // // Arguments: // call - The call node // ppArg - Pointer to the call argument pointer. We might replace the call argument by // changing *ppArg. // // Return Value: // None. // void Lowering::LowerArg(GenTreeCall* call, GenTree** ppArg) { GenTree* arg = *ppArg; JITDUMP("lowering arg : "); DISPNODE(arg); // No assignments should remain by Lowering. assert(!arg->OperIs(GT_ASG)); assert(!arg->OperIsPutArgStk()); // Assignments/stores at this level are not really placing an argument. // They are setting up temporary locals that will later be placed into // outgoing regs or stack. // Note that atomic ops may be stores and still produce a value. if (!arg->IsValue()) { assert((arg->OperIsStore() && !arg->IsValue()) || arg->IsArgPlaceHolderNode() || arg->IsNothingNode() || arg->OperIsCopyBlkOp()); return; } fgArgTabEntry* info = comp->gtArgEntryByNode(call, arg); assert(info->GetNode() == arg); var_types type = arg->TypeGet(); if (varTypeIsSmall(type)) { // Normalize 'type', it represents the item that we will be storing in the Outgoing Args type = TYP_INT; } #if defined(FEATURE_SIMD) #if defined(TARGET_X86) // Non-param TYP_SIMD12 local var nodes are massaged in Lower to TYP_SIMD16 to match their // allocated size (see lvSize()). However, when passing the variables as arguments, and // storing the variables to the outgoing argument area on the stack, we must use their // actual TYP_SIMD12 type, so exactly 12 bytes is allocated and written. if (type == TYP_SIMD16) { if ((arg->OperGet() == GT_LCL_VAR) || (arg->OperGet() == GT_STORE_LCL_VAR)) { const LclVarDsc* varDsc = comp->lvaGetDesc(arg->AsLclVarCommon()); type = varDsc->lvType; } else if (arg->OperIs(GT_SIMD, GT_HWINTRINSIC)) { GenTreeJitIntrinsic* jitIntrinsic = reinterpret_cast<GenTreeJitIntrinsic*>(arg); // For HWIntrinsic, there are some intrinsics like ExtractVector128 which have // a gtType of TYP_SIMD16 but a SimdSize of 32, so we need to include that in // the assert below. assert((jitIntrinsic->GetSimdSize() == 12) || (jitIntrinsic->GetSimdSize() == 16) || (jitIntrinsic->GetSimdSize() == 32)); if (jitIntrinsic->GetSimdSize() == 12) { type = TYP_SIMD12; } } } #elif defined(TARGET_AMD64) // TYP_SIMD8 parameters that are passed as longs if (type == TYP_SIMD8 && genIsValidIntReg(info->GetRegNum())) { GenTree* bitcast = comp->gtNewBitCastNode(TYP_LONG, arg); BlockRange().InsertAfter(arg, bitcast); *ppArg = arg = bitcast; assert(info->GetNode() == arg); type = TYP_LONG; } #endif // defined(TARGET_X86) #endif // defined(FEATURE_SIMD) // If we hit this we are probably double-lowering. assert(!arg->OperIsPutArg()); #if !defined(TARGET_64BIT) if (varTypeIsLong(type)) { noway_assert(arg->OperIs(GT_LONG)); GenTreeFieldList* fieldList = new (comp, GT_FIELD_LIST) GenTreeFieldList(); fieldList->AddFieldLIR(comp, arg->AsOp()->gtGetOp1(), 0, TYP_INT); fieldList->AddFieldLIR(comp, arg->AsOp()->gtGetOp2(), 4, TYP_INT); GenTree* newArg = NewPutArg(call, fieldList, info, type); if (info->GetRegNum() != REG_STK) { assert(info->numRegs == 2); // In the register argument case, NewPutArg replaces the original field list args with new // GT_PUTARG_REG nodes, inserts them in linear order and returns the field list. So the // only thing left to do is to insert the field list itself in linear order. assert(newArg == fieldList); BlockRange().InsertBefore(arg, newArg); } else { // For longs, we will replace the GT_LONG with a GT_FIELD_LIST, and put that under a PUTARG_STK. // Although the hi argument needs to be pushed first, that will be handled by the general case, // in which the fields will be reversed. assert(info->numSlots == 2); newArg->SetRegNum(REG_STK); BlockRange().InsertBefore(arg, fieldList, newArg); } *ppArg = newArg; assert(info->GetNode() == newArg); BlockRange().Remove(arg); } else #endif // !defined(TARGET_64BIT) { #ifdef TARGET_ARMARCH if (call->IsVarargs() || comp->opts.compUseSoftFP) { // For vararg call or on armel, reg args should be all integer. // Insert copies as needed to move float value to integer register. GenTree* newNode = LowerFloatArg(ppArg, info); if (newNode != nullptr) { type = newNode->TypeGet(); } } #endif // TARGET_ARMARCH GenTree* putArg = NewPutArg(call, arg, info, type); // In the case of register passable struct (in one or two registers) // the NewPutArg returns a new node (GT_PUTARG_REG or a GT_FIELD_LIST with two GT_PUTARG_REGs.) // If an extra node is returned, splice it in the right place in the tree. if (arg != putArg) { ReplaceArgWithPutArgOrBitcast(ppArg, putArg); } } } #ifdef TARGET_ARMARCH //------------------------------------------------------------------------ // LowerFloatArg: Lower float call arguments on the arm platform. // // Arguments: // arg - The arg node // info - call argument info // // Return Value: // Return nullptr, if no transformation was done; // return arg if there was in place transformation; // return a new tree if the root was changed. // // Notes: // This must handle scalar float arguments as well as GT_FIELD_LISTs // with floating point fields. // GenTree* Lowering::LowerFloatArg(GenTree** pArg, fgArgTabEntry* info) { GenTree* arg = *pArg; if (info->GetRegNum() != REG_STK) { if (arg->OperIs(GT_FIELD_LIST)) { // Transform fields that are passed as registers in place. regNumber currRegNumber = info->GetRegNum(); unsigned regIndex = 0; for (GenTreeFieldList::Use& use : arg->AsFieldList()->Uses()) { if (regIndex >= info->numRegs) { break; } GenTree* node = use.GetNode(); if (varTypeIsFloating(node)) { GenTree* intNode = LowerFloatArgReg(node, currRegNumber); assert(intNode != nullptr); ReplaceArgWithPutArgOrBitcast(&use.NodeRef(), intNode); } if (node->TypeGet() == TYP_DOUBLE) { currRegNumber = REG_NEXT(REG_NEXT(currRegNumber)); regIndex += 2; } else { currRegNumber = REG_NEXT(currRegNumber); regIndex += 1; } } // List fields were replaced in place. return arg; } else if (varTypeIsFloating(arg)) { GenTree* intNode = LowerFloatArgReg(arg, info->GetRegNum()); assert(intNode != nullptr); ReplaceArgWithPutArgOrBitcast(pArg, intNode); return *pArg; } } return nullptr; } //------------------------------------------------------------------------ // LowerFloatArgReg: Lower the float call argument node that is passed via register. // // Arguments: // arg - The arg node // regNum - register number // // Return Value: // Return new bitcast node, that moves float to int register. // GenTree* Lowering::LowerFloatArgReg(GenTree* arg, regNumber regNum) { var_types floatType = arg->TypeGet(); assert(varTypeIsFloating(floatType)); var_types intType = (floatType == TYP_DOUBLE) ? TYP_LONG : TYP_INT; GenTree* intArg = comp->gtNewBitCastNode(intType, arg); intArg->SetRegNum(regNum); #ifdef TARGET_ARM if (floatType == TYP_DOUBLE) { // A special case when we introduce TYP_LONG // during lowering for arm32 softFP to pass double // in int registers. assert(comp->opts.compUseSoftFP); regNumber nextReg = REG_NEXT(regNum); intArg->AsMultiRegOp()->gtOtherReg = nextReg; } #endif return intArg; } #endif // do lowering steps for each arg of a call void Lowering::LowerArgsForCall(GenTreeCall* call) { JITDUMP("objp:\n======\n"); if (call->gtCallThisArg != nullptr) { LowerArg(call, &call->gtCallThisArg->NodeRef()); } JITDUMP("\nargs:\n======\n"); for (GenTreeCall::Use& use : call->Args()) { LowerArg(call, &use.NodeRef()); } JITDUMP("\nlate:\n======\n"); for (GenTreeCall::Use& use : call->LateArgs()) { LowerArg(call, &use.NodeRef()); } } // helper that create a node representing a relocatable physical address computation GenTree* Lowering::AddrGen(ssize_t addr) { // this should end up in codegen as : instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, reg, addr) GenTree* result = comp->gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR); return result; } // variant that takes a void* GenTree* Lowering::AddrGen(void* addr) { return AddrGen((ssize_t)addr); } // do lowering steps for a call // this includes: // - adding the placement nodes (either stack or register variety) for arguments // - lowering the expression that calculates the target address // - adding nodes for other operations that occur after the call sequence starts and before // control transfer occurs (profiling and tail call helpers, pinvoke incantations) // void Lowering::LowerCall(GenTree* node) { GenTreeCall* call = node->AsCall(); JITDUMP("lowering call (before):\n"); DISPTREERANGE(BlockRange(), call); JITDUMP("\n"); call->ClearOtherRegs(); LowerArgsForCall(call); // note that everything generated from this point might run AFTER the outgoing args are placed GenTree* controlExpr = nullptr; bool callWasExpandedEarly = false; // for x86, this is where we record ESP for checking later to make sure stack is balanced // Check for Delegate.Invoke(). If so, we inline it. We get the // target-object and target-function from the delegate-object, and do // an indirect call. if (call->IsDelegateInvoke()) { controlExpr = LowerDelegateInvoke(call); } else { // Virtual and interface calls switch (call->gtFlags & GTF_CALL_VIRT_KIND_MASK) { case GTF_CALL_VIRT_STUB: controlExpr = LowerVirtualStubCall(call); break; case GTF_CALL_VIRT_VTABLE: assert(call->IsVirtualVtable()); if (!call->IsExpandedEarly()) { assert(call->gtControlExpr == nullptr); controlExpr = LowerVirtualVtableCall(call); } else { callWasExpandedEarly = true; controlExpr = call->gtControlExpr; } break; case GTF_CALL_NONVIRT: if (call->IsUnmanaged()) { controlExpr = LowerNonvirtPinvokeCall(call); } else if (call->gtCallType == CT_INDIRECT) { controlExpr = LowerIndirectNonvirtCall(call); } else { controlExpr = LowerDirectCall(call); } break; default: noway_assert(!"strange call type"); break; } } // Indirect calls should always go through GenTreeCall::gtCallAddr and // should never have a control expression as well. assert((call->gtCallType != CT_INDIRECT) || (controlExpr == nullptr)); if (call->IsTailCallViaJitHelper()) { // Either controlExpr or gtCallAddr must contain real call target. if (controlExpr == nullptr) { assert(call->gtCallType == CT_INDIRECT); assert(call->gtCallAddr != nullptr); controlExpr = call->gtCallAddr; } controlExpr = LowerTailCallViaJitHelper(call, controlExpr); } // Check if we need to thread a newly created controlExpr into the LIR // if ((controlExpr != nullptr) && !callWasExpandedEarly) { LIR::Range controlExprRange = LIR::SeqTree(comp, controlExpr); JITDUMP("results of lowering call:\n"); DISPRANGE(controlExprRange); ContainCheckRange(controlExprRange); BlockRange().InsertBefore(call, std::move(controlExprRange)); call->gtControlExpr = controlExpr; } if (comp->opts.IsCFGEnabled()) { LowerCFGCall(call); } if (call->IsFastTailCall()) { // Lower fast tail call can introduce new temps to set up args correctly for Callee. // This involves patching LCL_VAR and LCL_VAR_ADDR nodes holding Caller stack args // and replacing them with a new temp. Control expr also can contain nodes that need // to be patched. // Therefore lower fast tail call must be done after controlExpr is inserted into LIR. // There is one side effect which is flipping the order of PME and control expression // since LowerFastTailCall calls InsertPInvokeMethodEpilog. LowerFastTailCall(call); } if (varTypeIsStruct(call)) { LowerCallStruct(call); } ContainCheckCallOperands(call); JITDUMP("lowering call (after):\n"); DISPTREERANGE(BlockRange(), call); JITDUMP("\n"); } // Inserts profiler hook, GT_PROF_HOOK for a tail call node. // // AMD64: // We need to insert this after all nested calls, but before all the arguments to this call have been set up. // To do this, we look for the first GT_PUTARG_STK or GT_PUTARG_REG, and insert the hook immediately before // that. If there are no args, then it should be inserted before the call node. // // For example: // * stmtExpr void (top level) (IL 0x000...0x010) // arg0 SETUP | /--* argPlace ref REG NA $c5 // this in rcx | | /--* argPlace ref REG NA $c1 // | | | /--* call ref System.Globalization.CultureInfo.get_InvariantCulture $c2 // arg1 SETUP | | +--* st.lclVar ref V02 tmp1 REG NA $c2 // | | | /--* lclVar ref V02 tmp1 u : 2 (last use) REG NA $c2 // arg1 in rdx | | +--* putarg_reg ref REG NA // | | | /--* lclVar ref V00 arg0 u : 2 (last use) REG NA $80 // this in rcx | | +--* putarg_reg ref REG NA // | | /--* call nullcheck ref System.String.ToLower $c5 // | | { * stmtExpr void (embedded)(IL 0x000... ? ? ? ) // | | { \--* prof_hook void REG NA // arg0 in rcx | +--* putarg_reg ref REG NA // control expr | +--* const(h) long 0x7ffe8e910e98 ftn REG NA // \--* call void System.Runtime.Remoting.Identity.RemoveAppNameOrAppGuidIfNecessary $VN.Void // // In this case, the GT_PUTARG_REG src is a nested call. We need to put the instructions after that call // (as shown). We assume that of all the GT_PUTARG_*, only the first one can have a nested call. // // X86: // Insert the profiler hook immediately before the call. The profiler hook will preserve // all argument registers (ECX, EDX), but nothing else. // // Params: // callNode - tail call node // insertionPoint - if non-null, insert the profiler hook before this point. // If null, insert the profiler hook before args are setup // but after all arg side effects are computed. // void Lowering::InsertProfTailCallHook(GenTreeCall* call, GenTree* insertionPoint) { assert(call->IsTailCall()); assert(comp->compIsProfilerHookNeeded()); #if defined(TARGET_X86) if (insertionPoint == nullptr) { insertionPoint = call; } #else // !defined(TARGET_X86) if (insertionPoint == nullptr) { for (GenTreeCall::Use& use : call->Args()) { assert(!use.GetNode()->OperIs(GT_PUTARG_REG)); // We don't expect to see these in gtCallArgs if (use.GetNode()->OperIs(GT_PUTARG_STK)) { // found it insertionPoint = use.GetNode(); break; } } if (insertionPoint == nullptr) { for (GenTreeCall::Use& use : call->LateArgs()) { if (use.GetNode()->OperIs(GT_PUTARG_REG, GT_PUTARG_STK)) { // found it insertionPoint = use.GetNode(); break; } } // If there are no args, insert before the call node if (insertionPoint == nullptr) { insertionPoint = call; } } } #endif // !defined(TARGET_X86) assert(insertionPoint != nullptr); GenTree* profHookNode = new (comp, GT_PROF_HOOK) GenTree(GT_PROF_HOOK, TYP_VOID); BlockRange().InsertBefore(insertionPoint, profHookNode); } //------------------------------------------------------------------------ // LowerFastTailCall: Lower a call node dispatched as a fast tailcall (epilog + // jmp). // // Arguments: // call - the call node that is being dispatched as a fast tailcall. // // Assumptions: // call must be non-null. // // Notes: // For fast tail calls it is necessary to set up stack args in the incoming // arg stack space area. When args passed also come from this area we may // run into problems because we may end up overwriting the stack slot before // using it. For example, for foo(a, b) { return bar(b, a); }, if a and b // are on incoming arg stack space in foo they need to be swapped in this // area for the call to bar. This function detects this situation and // introduces a temp when an outgoing argument would overwrite a later-used // incoming argument. // // This function also handles inserting necessary profiler hooks and pinvoke // method epilogs in case there are inlined pinvokes. void Lowering::LowerFastTailCall(GenTreeCall* call) { #if FEATURE_FASTTAILCALL // Tail call restrictions i.e. conditions under which tail prefix is ignored. // Most of these checks are already done by importer or fgMorphTailCall(). // This serves as a double sanity check. assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); // tail calls from synchronized methods assert(!comp->opts.IsReversePInvoke()); // tail calls reverse pinvoke assert(!call->IsUnmanaged()); // tail calls to unamanaged methods assert(!comp->compLocallocUsed); // tail call from methods that also do localloc #ifdef TARGET_AMD64 assert(!comp->getNeedsGSSecurityCookie()); // jit64 compat: tail calls from methods that need GS check #endif // TARGET_AMD64 // We expect to see a call that meets the following conditions assert(call->IsFastTailCall()); // VM cannot use return address hijacking when A() and B() tail call each // other in mutual recursion. Therefore, this block is reachable through // a GC-safe point or the whole method is marked as fully interruptible. // // TODO-Cleanup: // optReachWithoutCall() depends on the fact that loop headers blocks // will have a block number > fgLastBB. These loop headers gets added // after dominator computation and get skipped by OptReachWithoutCall(). // The below condition cannot be asserted in lower because fgSimpleLowering() // can add a new basic block for range check failure which becomes // fgLastBB with block number > loop header block number. // assert((comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT) || // !comp->optReachWithoutCall(comp->fgFirstBB, comp->compCurBB) || comp->GetInterruptible()); // If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that // a method returns. This is a case of caller method has both PInvokes and tail calls. if (comp->compMethodRequiresPInvokeFrame()) { InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(call)); } // Args for tail call are setup in incoming arg area. The gc-ness of args of // caller and callee (which being tail called) may not match. Therefore, everything // from arg setup until the epilog need to be non-interuptible by GC. This is // achieved by inserting GT_START_NONGC before the very first GT_PUTARG_STK node // of call is setup. Note that once a stack arg is setup, it cannot have nested // calls subsequently in execution order to setup other args, because the nested // call could over-write the stack arg that is setup earlier. ArrayStack<GenTree*> putargs(comp->getAllocator(CMK_ArrayStack)); for (GenTreeCall::Use& use : call->Args()) { if (use.GetNode()->OperIs(GT_PUTARG_STK)) { putargs.Push(use.GetNode()); } } for (GenTreeCall::Use& use : call->LateArgs()) { if (use.GetNode()->OperIs(GT_PUTARG_STK)) { putargs.Push(use.GetNode()); } } GenTree* startNonGCNode = nullptr; if (!putargs.Empty()) { // Get the earliest operand of the first PUTARG_STK node. We will make // the requred copies of args before this node. bool unused; GenTree* insertionPoint = BlockRange().GetTreeRange(putargs.Bottom(), &unused).FirstNode(); // Insert GT_START_NONGC node before we evaluate the PUTARG_STK args. // Note that if there are no args to be setup on stack, no need to // insert GT_START_NONGC node. startNonGCNode = new (comp, GT_START_NONGC) GenTree(GT_START_NONGC, TYP_VOID); BlockRange().InsertBefore(insertionPoint, startNonGCNode); // Gc-interruptability in the following case: // foo(a, b, c, d, e) { bar(a, b, c, d, e); } // bar(a, b, c, d, e) { foo(a, b, d, d, e); } // // Since the instruction group starting from the instruction that sets up first // stack arg to the end of the tail call is marked as non-gc interruptible, // this will form a non-interruptible tight loop causing gc-starvation. To fix // this we insert GT_NO_OP as embedded stmt before GT_START_NONGC, if the method // has a single basic block and is not a GC-safe point. The presence of a single // nop outside non-gc interruptible region will prevent gc starvation. if ((comp->fgBBcount == 1) && !(comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT)) { assert(comp->fgFirstBB == comp->compCurBB); GenTree* noOp = new (comp, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); BlockRange().InsertBefore(startNonGCNode, noOp); } // Since this is a fast tailcall each PUTARG_STK will place the argument in the // _incoming_ arg space area. This will effectively overwrite our already existing // incoming args that live in that area. If we have later uses of those args, this // is a problem. We introduce a defensive copy into a temp here of those args that // potentially may cause problems. for (int i = 0; i < putargs.Height(); i++) { GenTreePutArgStk* put = putargs.Bottom(i)->AsPutArgStk(); unsigned int overwrittenStart = put->getArgOffset(); unsigned int overwrittenEnd = overwrittenStart + put->GetStackByteSize(); int baseOff = -1; // Stack offset of first arg on stack for (unsigned callerArgLclNum = 0; callerArgLclNum < comp->info.compArgsCount; callerArgLclNum++) { LclVarDsc* callerArgDsc = comp->lvaGetDesc(callerArgLclNum); if (callerArgDsc->lvIsRegArg) { continue; } unsigned int argStart; unsigned int argEnd; #if defined(TARGET_AMD64) if (TargetOS::IsWindows) { // On Windows x64, the argument position determines the stack slot uniquely, and even the // register args take up space in the stack frame (shadow space). argStart = callerArgLclNum * TARGET_POINTER_SIZE; argEnd = argStart + static_cast<unsigned int>(callerArgDsc->lvArgStackSize()); } else #endif // TARGET_AMD64 { assert(callerArgDsc->GetStackOffset() != BAD_STK_OFFS); if (baseOff == -1) { baseOff = callerArgDsc->GetStackOffset(); } // On all ABIs where we fast tail call the stack args should come in order. assert(baseOff <= callerArgDsc->GetStackOffset()); // Compute offset of this stack argument relative to the first stack arg. // This will be its offset into the incoming arg space area. argStart = static_cast<unsigned int>(callerArgDsc->GetStackOffset() - baseOff); argEnd = argStart + comp->lvaLclSize(callerArgLclNum); } // If ranges do not overlap then this PUTARG_STK will not mess up the arg. if ((overwrittenEnd <= argStart) || (overwrittenStart >= argEnd)) { continue; } // Codegen cannot handle a partially overlapping copy. For // example, if we have // bar(S16 stack, S32 stack2) // foo(S32 stack, S32 stack2) { bar(..., stack) } // then we may end up having to move 'stack' in foo 16 bytes // ahead. It is possible that this PUTARG_STK is the only use, // in which case we will need to introduce a temp, so look for // uses starting from it. Note that we assume that in-place // copies are OK. GenTree* lookForUsesFrom = put->gtNext; if (overwrittenStart != argStart) { lookForUsesFrom = insertionPoint; } RehomeArgForFastTailCall(callerArgLclNum, insertionPoint, lookForUsesFrom, call); // The above call can introduce temps and invalidate the pointer. callerArgDsc = comp->lvaGetDesc(callerArgLclNum); // For promoted locals we have more work to do as its fields could also have been invalidated. if (!callerArgDsc->lvPromoted) { continue; } unsigned int fieldsFirst = callerArgDsc->lvFieldLclStart; unsigned int fieldsEnd = fieldsFirst + callerArgDsc->lvFieldCnt; for (unsigned int j = fieldsFirst; j < fieldsEnd; j++) { RehomeArgForFastTailCall(j, insertionPoint, lookForUsesFrom, call); } } } } // Insert GT_PROF_HOOK node to emit profiler tail call hook. This should be // inserted before the args are setup but after the side effects of args are // computed. That is, GT_PROF_HOOK node needs to be inserted before GT_START_NONGC // node if one exists. if (comp->compIsProfilerHookNeeded()) { InsertProfTailCallHook(call, startNonGCNode); } #else // !FEATURE_FASTTAILCALL // Platform does not implement fast tail call mechanism. This cannot be // reached because we always choose to do a tailcall via helper on those // platforms (or no tailcall at all). unreached(); #endif } // //------------------------------------------------------------------------ // RehomeArgForFastTailCall: Introduce temps for args that may be overwritten // during fast tailcall sequence. // // Arguments: // lclNum - the lcl num of the arg that will be overwritten. // insertTempBefore - the node at which to copy the arg into a temp. // lookForUsesStart - the node where to start scanning and replacing uses of // the arg specified by lclNum. // callNode - the call node that is being dispatched as a fast tailcall. // // Assumptions: // all args must be non-null. // // Notes: // This function scans for uses of the arg specified by lclNum starting // from the lookForUsesStart node. If it finds any uses it introduces a temp // for this argument and updates uses to use this instead. In the situation // where it introduces a temp it can thus invalidate pointers to other // locals. // void Lowering::RehomeArgForFastTailCall(unsigned int lclNum, GenTree* insertTempBefore, GenTree* lookForUsesStart, GenTreeCall* callNode) { unsigned int tmpLclNum = BAD_VAR_NUM; for (GenTree* treeNode = lookForUsesStart; treeNode != callNode; treeNode = treeNode->gtNext) { if (!treeNode->OperIsLocal() && !treeNode->OperIsLocalAddr()) { continue; } GenTreeLclVarCommon* lcl = treeNode->AsLclVarCommon(); if (lcl->GetLclNum() != lclNum) { continue; } // Create tmp and use it in place of callerArgDsc if (tmpLclNum == BAD_VAR_NUM) { tmpLclNum = comp->lvaGrabTemp(true DEBUGARG("Fast tail call lowering is creating a new local variable")); LclVarDsc* callerArgDsc = comp->lvaGetDesc(lclNum); var_types tmpTyp = genActualType(callerArgDsc->TypeGet()); comp->lvaTable[tmpLclNum].lvType = tmpTyp; // TODO-CQ: I don't see why we should copy doNotEnreg. comp->lvaTable[tmpLclNum].lvDoNotEnregister = callerArgDsc->lvDoNotEnregister; #ifdef DEBUG comp->lvaTable[tmpLclNum].SetDoNotEnregReason(callerArgDsc->GetDoNotEnregReason()); #endif // DEBUG GenTree* value = comp->gtNewLclvNode(lclNum, tmpTyp); if (tmpTyp == TYP_STRUCT) { comp->lvaSetStruct(tmpLclNum, comp->lvaGetStruct(lclNum), false); } GenTreeLclVar* storeLclVar = comp->gtNewStoreLclVar(tmpLclNum, value); BlockRange().InsertBefore(insertTempBefore, LIR::SeqTree(comp, storeLclVar)); ContainCheckRange(value, storeLclVar); LowerNode(storeLclVar); } lcl->SetLclNum(tmpLclNum); } } //------------------------------------------------------------------------ // LowerTailCallViaJitHelper: lower a call via the tailcall JIT helper. Morph // has already inserted tailcall helper special arguments. This function inserts // actual data for some placeholders. This function is only used on x86. // // Lower // tail.call(<function args>, int numberOfOldStackArgs, int dummyNumberOfNewStackArgs, int flags, void* dummyArg) // as // JIT_TailCall(<function args>, int numberOfOldStackArgsWords, int numberOfNewStackArgsWords, int flags, void* // callTarget) // Note that the special arguments are on the stack, whereas the function arguments follow the normal convention. // // Also inserts PInvoke method epilog if required. // // Arguments: // call - The call node // callTarget - The real call target. This is used to replace the dummyArg during lowering. // // Return Value: // Returns control expression tree for making a call to helper Jit_TailCall. // GenTree* Lowering::LowerTailCallViaJitHelper(GenTreeCall* call, GenTree* callTarget) { // Tail call restrictions i.e. conditions under which tail prefix is ignored. // Most of these checks are already done by importer or fgMorphTailCall(). // This serves as a double sanity check. assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); // tail calls from synchronized methods assert(!call->IsUnmanaged()); // tail calls to unamanaged methods assert(!comp->compLocallocUsed); // tail call from methods that also do localloc // We expect to see a call that meets the following conditions assert(call->IsTailCallViaJitHelper()); assert(callTarget != nullptr); // The TailCall helper call never returns to the caller and is not GC interruptible. // Therefore the block containing the tail call should be a GC safe point to avoid // GC starvation. It is legal for the block to be unmarked iff the entry block is a // GC safe point, as the entry block trivially dominates every reachable block. assert((comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT) || (comp->fgFirstBB->bbFlags & BBF_GC_SAFE_POINT)); // If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that // a method returns. This is a case of caller method has both PInvokes and tail calls. if (comp->compMethodRequiresPInvokeFrame()) { InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(call)); } // Remove gtCallAddr from execution order if present. if (call->gtCallType == CT_INDIRECT) { assert(call->gtCallAddr != nullptr); bool isClosed; LIR::ReadOnlyRange callAddrRange = BlockRange().GetTreeRange(call->gtCallAddr, &isClosed); assert(isClosed); BlockRange().Remove(std::move(callAddrRange)); } // The callTarget tree needs to be sequenced. LIR::Range callTargetRange = LIR::SeqTree(comp, callTarget); // Verify the special args are what we expect, and replace the dummy args with real values. // We need to figure out the size of the outgoing stack arguments, not including the special args. // The number of 4-byte words is passed to the helper for the incoming and outgoing argument sizes. // This number is exactly the next slot number in the call's argument info struct. unsigned nNewStkArgsBytes = call->fgArgInfo->GetNextSlotByteOffset(); const int wordSize = 4; unsigned nNewStkArgsWords = nNewStkArgsBytes / wordSize; DEBUG_ARG_SLOTS_ASSERT(call->fgArgInfo->GetNextSlotNum() == nNewStkArgsWords); assert(nNewStkArgsWords >= 4); // There must be at least the four special stack args. nNewStkArgsWords -= 4; unsigned numArgs = call->fgArgInfo->ArgCount(); fgArgTabEntry* argEntry; // arg 0 == callTarget. argEntry = comp->gtArgEntryByArgNum(call, numArgs - 1); assert(argEntry != nullptr); GenTree* arg0 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1(); ContainCheckRange(callTargetRange); BlockRange().InsertAfter(arg0, std::move(callTargetRange)); bool isClosed; LIR::ReadOnlyRange secondArgRange = BlockRange().GetTreeRange(arg0, &isClosed); assert(isClosed); BlockRange().Remove(std::move(secondArgRange)); argEntry->GetNode()->AsPutArgStk()->gtOp1 = callTarget; // arg 1 == flags argEntry = comp->gtArgEntryByArgNum(call, numArgs - 2); assert(argEntry != nullptr); GenTree* arg1 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1(); assert(arg1->gtOper == GT_CNS_INT); ssize_t tailCallHelperFlags = 1 | // always restore EDI,ESI,EBX (call->IsVirtualStub() ? 0x2 : 0x0); // Stub dispatch flag arg1->AsIntCon()->gtIconVal = tailCallHelperFlags; // arg 2 == numberOfNewStackArgsWords argEntry = comp->gtArgEntryByArgNum(call, numArgs - 3); assert(argEntry != nullptr); GenTree* arg2 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1(); assert(arg2->gtOper == GT_CNS_INT); arg2->AsIntCon()->gtIconVal = nNewStkArgsWords; #ifdef DEBUG // arg 3 == numberOfOldStackArgsWords argEntry = comp->gtArgEntryByArgNum(call, numArgs - 4); assert(argEntry != nullptr); GenTree* arg3 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1(); assert(arg3->gtOper == GT_CNS_INT); #endif // DEBUG // Transform this call node into a call to Jit tail call helper. call->gtCallType = CT_HELPER; call->gtCallMethHnd = comp->eeFindHelper(CORINFO_HELP_TAILCALL); call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK; // Lower this as if it were a pure helper call. call->gtCallMoreFlags &= ~(GTF_CALL_M_TAILCALL | GTF_CALL_M_TAILCALL_VIA_JIT_HELPER); GenTree* result = LowerDirectCall(call); // Now add back tail call flags for identifying this node as tail call dispatched via helper. call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL | GTF_CALL_M_TAILCALL_VIA_JIT_HELPER; #ifdef PROFILING_SUPPORTED // Insert profiler tail call hook if needed. // Since we don't know the insertion point, pass null for second param. if (comp->compIsProfilerHookNeeded()) { InsertProfTailCallHook(call, nullptr); } #endif // PROFILING_SUPPORTED return result; } //------------------------------------------------------------------------ // LowerCFGCall: Potentially lower a call to use control-flow guard. This // expands indirect calls into either a validate+call sequence or to a dispatch // helper taking the original target in a special register. // // Arguments: // call - The call node // void Lowering::LowerCFGCall(GenTreeCall* call) { assert(!call->IsHelperCall(comp, CORINFO_HELP_DISPATCH_INDIRECT_CALL)); if (call->IsHelperCall(comp, CORINFO_HELP_VALIDATE_INDIRECT_CALL)) { return; } GenTree* callTarget = call->gtCallType == CT_INDIRECT ? call->gtCallAddr : call->gtControlExpr; if ((callTarget == nullptr) || callTarget->IsIntegralConst()) { // This is a direct call, no CFG check is necessary. return; } CFGCallKind cfgKind = call->GetCFGCallKind(); switch (cfgKind) { case CFGCallKind::ValidateAndCall: { // To safely apply CFG we need to generate a very specific pattern: // in particular, it is a safety issue to allow the JIT to reload // the call target from memory between calling // CORINFO_HELP_VALIDATE_INDIRECT_CALL and the target. This is // something that would easily occur in debug codegen if we // produced high-level IR. Instead we will use a GT_PHYSREG node // to get the target back from the register that contains the target. // // Additionally, the validator does not preserve all arg registers, // so we have to move all GT_PUTARG_REG nodes that would otherwise // be trashed ahead. The JIT also has an internal invariant that // once GT_PUTARG nodes start to appear in LIR, the call is coming // up. To avoid breaking this invariant we move _all_ GT_PUTARG // nodes (in particular, GC info reporting relies on this). // // To sum up, we end up transforming // // ta... = <early args> // tb... = <late args> // tc = callTarget // GT_CALL tc, ta..., tb... // // into // // ta... = <early args> (without GT_PUTARG_* nodes) // tb = callTarget // GT_CALL CORINFO_HELP_VALIDATE_INDIRECT_CALL, tb // tc = GT_PHYSREG REG_VALIDATE_INDIRECT_CALL_ADDR (preserved by helper) // td = <moved GT_PUTARG_* nodes> // GT_CALL tb, ta..., td.. // GenTree* regNode = PhysReg(REG_VALIDATE_INDIRECT_CALL_ADDR, TYP_I_IMPL); LIR::Use useOfTar; bool gotUse = BlockRange().TryGetUse(callTarget, &useOfTar); assert(gotUse); useOfTar.ReplaceWith(regNode); GenTree* targetPlaceholder = comp->gtNewZeroConNode(callTarget->TypeGet()); // Add the call to the validator. Use a placeholder for the target while we // morph, sequence and lower, to avoid redoing that for the actual target. GenTreeCall::Use* args = comp->gtNewCallArgs(targetPlaceholder); GenTreeCall* validate = comp->gtNewHelperCallNode(CORINFO_HELP_VALIDATE_INDIRECT_CALL, TYP_VOID, args); comp->fgMorphTree(validate); LIR::Range validateRange = LIR::SeqTree(comp, validate); GenTree* validateFirst = validateRange.FirstNode(); GenTree* validateLast = validateRange.LastNode(); // Insert the validator with the call target before the late args. BlockRange().InsertBefore(call, std::move(validateRange)); // Swap out the target gotUse = BlockRange().TryGetUse(targetPlaceholder, &useOfTar); assert(gotUse); useOfTar.ReplaceWith(callTarget); targetPlaceholder->SetUnusedValue(); LowerRange(validateFirst, validateLast); // Insert the PHYSREG node that we must load right after validation. BlockRange().InsertAfter(validate, regNode); LowerNode(regNode); // Finally move all GT_PUTARG_* nodes for (GenTreeCall::Use& use : call->Args()) { GenTree* node = use.GetNode(); if (!node->IsValue()) { // Non-value nodes in early args are setup nodes for late args. continue; } assert(node->OperIsPutArg() || node->OperIsFieldList()); MoveCFGCallArg(call, node); } for (GenTreeCall::Use& use : call->LateArgs()) { GenTree* node = use.GetNode(); assert(node->OperIsPutArg() || node->OperIsFieldList()); MoveCFGCallArg(call, node); } break; } case CFGCallKind::Dispatch: { #ifdef REG_DISPATCH_INDIRECT_CALL_ADDR // Now insert the call target as an extra argument. // // First append the early placeholder arg GenTreeCall::Use** earlySlot = &call->gtCallArgs; unsigned int index = call->gtCallThisArg != nullptr ? 1 : 0; while (*earlySlot != nullptr) { earlySlot = &(*earlySlot)->NextRef(); index++; } assert(index == call->fgArgInfo->ArgCount()); GenTree* placeHolder = comp->gtNewArgPlaceHolderNode(callTarget->TypeGet(), NO_CLASS_HANDLE); placeHolder->gtFlags |= GTF_LATE_ARG; *earlySlot = comp->gtNewCallArgs(placeHolder); // Append the late actual arg GenTreeCall::Use** lateSlot = &call->gtCallLateArgs; unsigned int lateIndex = 0; while (*lateSlot != nullptr) { lateSlot = &(*lateSlot)->NextRef(); lateIndex++; } *lateSlot = comp->gtNewCallArgs(callTarget); // Add an entry into the arg info regNumber regNum = REG_DISPATCH_INDIRECT_CALL_ADDR; unsigned numRegs = 1; unsigned byteSize = TARGET_POINTER_SIZE; unsigned byteAlignment = TARGET_POINTER_SIZE; bool isStruct = false; bool isFloatHfa = false; bool isVararg = false; fgArgTabEntry* entry = call->fgArgInfo->AddRegArg(index, placeHolder, *earlySlot, regNum, numRegs, byteSize, byteAlignment, isStruct, isFloatHfa, isVararg UNIX_AMD64_ABI_ONLY_ARG(REG_STK) UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(nullptr)); entry->lateUse = *lateSlot; entry->SetLateArgInx(lateIndex); // Lower the newly added args now that call is updated LowerArg(call, &(*earlySlot)->NodeRef()); LowerArg(call, &(*lateSlot)->NodeRef()); // Finally update the call to be a helper call call->gtCallType = CT_HELPER; call->gtCallMethHnd = comp->eeFindHelper(CORINFO_HELP_DISPATCH_INDIRECT_CALL); call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK; #ifdef FEATURE_READYTORUN call->gtEntryPoint.addr = nullptr; call->gtEntryPoint.accessType = IAT_VALUE; #endif // Now relower the call target call->gtControlExpr = LowerDirectCall(call); if (call->gtControlExpr != nullptr) { LIR::Range dispatchControlExprRange = LIR::SeqTree(comp, call->gtControlExpr); ContainCheckRange(dispatchControlExprRange); BlockRange().InsertBefore(call, std::move(dispatchControlExprRange)); } #else assert(!"Unexpected CFGCallKind::Dispatch for platform without dispatcher"); #endif break; } default: unreached(); } } //------------------------------------------------------------------------ // IsInvariantInRange: Check if a node is invariant in the specified range. In // other words, can 'node' be moved to right before 'endExclusive' without its // computation changing values? // // Arguments: // node - The node. // endExclusive - The exclusive end of the range to check invariance for. // bool Lowering::IsInvariantInRange(GenTree* node, GenTree* endExclusive) { assert(node->Precedes(endExclusive)); if (node->IsInvariant()) { return true; } if (!node->IsValue()) { return false; } if (node->OperIsLocal()) { GenTreeLclVarCommon* lcl = node->AsLclVarCommon(); LclVarDsc* desc = comp->lvaGetDesc(lcl); if (desc->IsAddressExposed()) { return false; } // Currently, non-address exposed locals have the property that their // use occurs at the user, so no further interference check is // necessary. return true; } return false; } //------------------------------------------------------------------------ // MoveCFGCallArg: Given a call that will be CFG transformed using the // validate+call scheme, and an argument GT_PUTARG_* or GT_FIELD_LIST node, // move that node right before the call. // // Arguments: // call - The call that is being CFG transformed // node - The argument node // // Remarks: // We can always move the GT_PUTARG_* node further ahead as the side-effects // of these nodes are handled by LSRA. However, the operands of these nodes // are not always safe to move further ahead; for invariant operands, we // move them ahead as well to shorten the lifetime of these values. // void Lowering::MoveCFGCallArg(GenTreeCall* call, GenTree* node) { assert(node->OperIsPutArg() || node->OperIsFieldList()); if (node->OperIsFieldList()) { JITDUMP("Node is a GT_FIELD_LIST; moving all operands\n"); for (GenTreeFieldList::Use& operand : node->AsFieldList()->Uses()) { assert(operand.GetNode()->OperIsPutArg()); MoveCFGCallArg(call, operand.GetNode()); } } else { GenTree* operand = node->AsOp()->gtGetOp1(); JITDUMP("Checking if we can move operand of GT_PUTARG_* node:\n"); DISPTREE(operand); if (((operand->gtFlags & GTF_ALL_EFFECT) == 0) && IsInvariantInRange(operand, call)) { JITDUMP("...yes, moving to after validator call\n"); BlockRange().Remove(operand); BlockRange().InsertBefore(call, operand); } else { JITDUMP("...no, operand has side effects or is not invariant\n"); } } JITDUMP("Moving\n"); DISPTREE(node); JITDUMP("\n"); BlockRange().Remove(node); BlockRange().InsertBefore(call, node); } #ifndef TARGET_64BIT //------------------------------------------------------------------------ // Lowering::DecomposeLongCompare: Decomposes a TYP_LONG compare node. // // Arguments: // cmp - the compare node // // Return Value: // The next node to lower. // // Notes: // This is done during lowering because DecomposeLongs handles only nodes // that produce TYP_LONG values. Compare nodes may consume TYP_LONG values // but produce TYP_INT values. // GenTree* Lowering::DecomposeLongCompare(GenTree* cmp) { assert(cmp->gtGetOp1()->TypeGet() == TYP_LONG); GenTree* src1 = cmp->gtGetOp1(); GenTree* src2 = cmp->gtGetOp2(); assert(src1->OperIs(GT_LONG)); assert(src2->OperIs(GT_LONG)); GenTree* loSrc1 = src1->gtGetOp1(); GenTree* hiSrc1 = src1->gtGetOp2(); GenTree* loSrc2 = src2->gtGetOp1(); GenTree* hiSrc2 = src2->gtGetOp2(); BlockRange().Remove(src1); BlockRange().Remove(src2); genTreeOps condition = cmp->OperGet(); GenTree* loCmp; GenTree* hiCmp; if (cmp->OperIs(GT_EQ, GT_NE)) { // // Transform (x EQ|NE y) into (((x.lo XOR y.lo) OR (x.hi XOR y.hi)) EQ|NE 0). If y is 0 then this can // be reduced to just ((x.lo OR x.hi) EQ|NE 0). The OR is expected to set the condition flags so we // don't need to generate a redundant compare against 0, we only generate a SETCC|JCC instruction. // // XOR is used rather than SUB because it is commutative and thus allows swapping the operands when // the first happens to be a constant. Usually only the second compare operand is a constant but it's // still possible to have a constant on the left side. For example, when src1 is a uint->ulong cast // then hiSrc1 would be 0. // if (loSrc1->OperIs(GT_CNS_INT)) { std::swap(loSrc1, loSrc2); } if (loSrc2->IsIntegralConst(0)) { BlockRange().Remove(loSrc2); loCmp = loSrc1; } else { loCmp = comp->gtNewOperNode(GT_XOR, TYP_INT, loSrc1, loSrc2); BlockRange().InsertBefore(cmp, loCmp); ContainCheckBinary(loCmp->AsOp()); } if (hiSrc1->OperIs(GT_CNS_INT)) { std::swap(hiSrc1, hiSrc2); } if (hiSrc2->IsIntegralConst(0)) { BlockRange().Remove(hiSrc2); hiCmp = hiSrc1; } else { hiCmp = comp->gtNewOperNode(GT_XOR, TYP_INT, hiSrc1, hiSrc2); BlockRange().InsertBefore(cmp, hiCmp); ContainCheckBinary(hiCmp->AsOp()); } hiCmp = comp->gtNewOperNode(GT_OR, TYP_INT, loCmp, hiCmp); BlockRange().InsertBefore(cmp, hiCmp); ContainCheckBinary(hiCmp->AsOp()); } else { assert(cmp->OperIs(GT_LT, GT_LE, GT_GE, GT_GT)); // // If the compare is signed then (x LT|GE y) can be transformed into ((x SUB y) LT|GE 0). // If the compare is unsigned we can still use SUB but we need to check the Carry flag, // not the actual result. In both cases we can simply check the appropiate condition flags // and ignore the actual result: // SUB_LO loSrc1, loSrc2 // SUB_HI hiSrc1, hiSrc2 // SETCC|JCC (signed|unsigned LT|GE) // If loSrc2 happens to be 0 then the first SUB can be eliminated and the second one can // be turned into a CMP because the first SUB would have set carry to 0. This effectively // transforms a long compare against 0 into an int compare of the high part against 0. // // (x LE|GT y) can to be transformed into ((x SUB y) LE|GT 0) but checking that a long value // is greater than 0 is not so easy. We need to turn this into a positive/negative check // like the one we get for LT|GE compares, this can be achieved by swapping the compare: // (x LE|GT y) becomes (y GE|LT x) // // Having to swap operands is problematic when the second operand is a constant. The constant // moves to the first operand where it cannot be contained and thus needs a register. This can // be avoided by changing the constant such that LE|GT becomes LT|GE: // (x LE|GT 41) becomes (x LT|GE 42) // if (cmp->OperIs(GT_LE, GT_GT)) { bool mustSwap = true; if (loSrc2->OperIs(GT_CNS_INT) && hiSrc2->OperIs(GT_CNS_INT)) { uint32_t loValue = static_cast<uint32_t>(loSrc2->AsIntCon()->IconValue()); uint32_t hiValue = static_cast<uint32_t>(hiSrc2->AsIntCon()->IconValue()); uint64_t value = static_cast<uint64_t>(loValue) | (static_cast<uint64_t>(hiValue) << 32); uint64_t maxValue = cmp->IsUnsigned() ? UINT64_MAX : INT64_MAX; if (value != maxValue) { value++; loValue = value & UINT32_MAX; hiValue = (value >> 32) & UINT32_MAX; loSrc2->AsIntCon()->SetIconValue(loValue); hiSrc2->AsIntCon()->SetIconValue(hiValue); condition = cmp->OperIs(GT_LE) ? GT_LT : GT_GE; mustSwap = false; } } if (mustSwap) { std::swap(loSrc1, loSrc2); std::swap(hiSrc1, hiSrc2); condition = GenTree::SwapRelop(condition); } } assert((condition == GT_LT) || (condition == GT_GE)); if (loSrc2->IsIntegralConst(0)) { BlockRange().Remove(loSrc2); // Very conservative dead code removal... but it helps. if (loSrc1->OperIs(GT_CNS_INT, GT_LCL_VAR, GT_LCL_FLD)) { BlockRange().Remove(loSrc1); } else { loSrc1->SetUnusedValue(); } hiCmp = comp->gtNewOperNode(GT_CMP, TYP_VOID, hiSrc1, hiSrc2); BlockRange().InsertBefore(cmp, hiCmp); ContainCheckCompare(hiCmp->AsOp()); } else { loCmp = comp->gtNewOperNode(GT_CMP, TYP_VOID, loSrc1, loSrc2); hiCmp = comp->gtNewOperNode(GT_SUB_HI, TYP_INT, hiSrc1, hiSrc2); BlockRange().InsertBefore(cmp, loCmp, hiCmp); ContainCheckCompare(loCmp->AsOp()); ContainCheckBinary(hiCmp->AsOp()); // // Try to move the first SUB_HI operands right in front of it, this allows using // a single temporary register instead of 2 (one for CMP and one for SUB_HI). Do // this only for locals as they won't change condition flags. Note that we could // move constants (except 0 which generates XOR reg, reg) but it's extremely rare // to have a constant as the first operand. // if (hiSrc1->OperIs(GT_LCL_VAR, GT_LCL_FLD)) { BlockRange().Remove(hiSrc1); BlockRange().InsertBefore(hiCmp, hiSrc1); } } } hiCmp->gtFlags |= GTF_SET_FLAGS; if (hiCmp->IsValue()) { hiCmp->SetUnusedValue(); } LIR::Use cmpUse; if (BlockRange().TryGetUse(cmp, &cmpUse) && cmpUse.User()->OperIs(GT_JTRUE)) { BlockRange().Remove(cmp); GenTree* jcc = cmpUse.User(); jcc->AsOp()->gtOp1 = nullptr; jcc->ChangeOper(GT_JCC); jcc->gtFlags |= GTF_USE_FLAGS; jcc->AsCC()->gtCondition = GenCondition::FromIntegralRelop(condition, cmp->IsUnsigned()); } else { cmp->AsOp()->gtOp1 = nullptr; cmp->AsOp()->gtOp2 = nullptr; cmp->ChangeOper(GT_SETCC); cmp->gtFlags |= GTF_USE_FLAGS; cmp->AsCC()->gtCondition = GenCondition::FromIntegralRelop(condition, cmp->IsUnsigned()); } return cmp->gtNext; } #endif // !TARGET_64BIT //------------------------------------------------------------------------ // Lowering::OptimizeConstCompare: Performs various "compare with const" optimizations. // // Arguments: // cmp - the compare node // // Return Value: // The original compare node if lowering should proceed as usual or the next node // to lower if the compare node was changed in such a way that lowering is no // longer needed. // // Notes: // - Narrow operands to enable memory operand containment (XARCH specific). // - Transform cmp(and(x, y), 0) into test(x, y) (XARCH/Arm64 specific but could // be used for ARM as well if support for GT_TEST_EQ/GT_TEST_NE is added). // - Transform TEST(x, LSH(1, y)) into BT(x, y) (XARCH specific) // - Transform RELOP(OP, 0) into SETCC(OP) or JCC(OP) if OP can set the // condition flags appropriately (XARCH/ARM64 specific but could be extended // to ARM32 as well if ARM32 codegen supports GTF_SET_FLAGS). // GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) { assert(cmp->gtGetOp2()->IsIntegralConst()); #if defined(TARGET_XARCH) || defined(TARGET_ARM64) GenTree* op1 = cmp->gtGetOp1(); GenTreeIntCon* op2 = cmp->gtGetOp2()->AsIntCon(); ssize_t op2Value = op2->IconValue(); #ifdef TARGET_XARCH var_types op1Type = op1->TypeGet(); if (IsContainableMemoryOp(op1) && varTypeIsSmall(op1Type) && FitsIn(op1Type, op2Value)) { // // If op1's type is small then try to narrow op2 so it has the same type as op1. // Small types are usually used by memory loads and if both compare operands have // the same type then the memory load can be contained. In certain situations // (e.g "cmp ubyte, 200") we also get a smaller instruction encoding. // op2->gtType = op1Type; } else #endif if (op1->OperIs(GT_CAST) && !op1->gtOverflow()) { GenTreeCast* cast = op1->AsCast(); var_types castToType = cast->CastToType(); GenTree* castOp = cast->gtGetOp1(); if (((castToType == TYP_BOOL) || (castToType == TYP_UBYTE)) && FitsIn<UINT8>(op2Value)) { // // Since we're going to remove the cast we need to be able to narrow the cast operand // to the cast type. This can be done safely only for certain opers (e.g AND, OR, XOR). // Some opers just can't be narrowed (e.g DIV, MUL) while other could be narrowed but // doing so would produce incorrect results (e.g. RSZ, RSH). // // The below list of handled opers is conservative but enough to handle the most common // situations. In particular this include CALL, sometimes the JIT unnecessarilly widens // the result of bool returning calls. // bool removeCast = #ifdef TARGET_ARM64 (op2Value == 0) && cmp->OperIs(GT_EQ, GT_NE, GT_GT) && #endif (castOp->OperIs(GT_CALL, GT_LCL_VAR) || castOp->OperIs(GT_OR, GT_XOR, GT_AND) #ifdef TARGET_XARCH || IsContainableMemoryOp(castOp) #endif ); if (removeCast) { assert(!castOp->gtOverflowEx()); // Must not be an overflow checking operation #ifdef TARGET_ARM64 bool cmpEq = cmp->OperIs(GT_EQ); cmp->SetOperRaw(cmpEq ? GT_TEST_EQ : GT_TEST_NE); op2->SetIconValue(0xff); op2->gtType = castOp->gtType; #else castOp->gtType = castToType; op2->gtType = castToType; #endif // If we have any contained memory ops on castOp, they must now not be contained. if (castOp->OperIs(GT_OR, GT_XOR, GT_AND)) { GenTree* op1 = castOp->gtGetOp1(); if ((op1 != nullptr) && !op1->IsCnsIntOrI()) { op1->ClearContained(); } GenTree* op2 = castOp->gtGetOp2(); if ((op2 != nullptr) && !op2->IsCnsIntOrI()) { op2->ClearContained(); } } cmp->AsOp()->gtOp1 = castOp; BlockRange().Remove(cast); } } } else if (op1->OperIs(GT_AND) && cmp->OperIs(GT_EQ, GT_NE)) { // // Transform ((x AND y) EQ|NE 0) into (x TEST_EQ|TEST_NE y) when possible. // GenTree* andOp1 = op1->gtGetOp1(); GenTree* andOp2 = op1->gtGetOp2(); if (op2Value != 0) { // // If we don't have a 0 compare we can get one by transforming ((x AND mask) EQ|NE mask) // into ((x AND mask) NE|EQ 0) when mask is a single bit. // if (isPow2<target_size_t>(static_cast<target_size_t>(op2Value)) && andOp2->IsIntegralConst(op2Value)) { op2Value = 0; op2->SetIconValue(0); cmp->SetOperRaw(GenTree::ReverseRelop(cmp->OperGet())); } } if (op2Value == 0) { BlockRange().Remove(op1); BlockRange().Remove(op2); cmp->SetOperRaw(cmp->OperIs(GT_EQ) ? GT_TEST_EQ : GT_TEST_NE); cmp->AsOp()->gtOp1 = andOp1; cmp->AsOp()->gtOp2 = andOp2; // We will re-evaluate containment below andOp1->ClearContained(); andOp2->ClearContained(); #ifdef TARGET_XARCH if (IsContainableMemoryOp(andOp1) && andOp2->IsIntegralConst()) { // // For "test" we only care about the bits that are set in the second operand (mask). // If the mask fits in a small type then we can narrow both operands to generate a "test" // instruction with a smaller encoding ("test" does not have a r/m32, imm8 form) and avoid // a widening load in some cases. // // For 16 bit operands we narrow only if the memory operand is already 16 bit. This matches // the behavior of a previous implementation and avoids adding more cases where we generate // 16 bit instructions that require a length changing prefix (0x66). These suffer from // significant decoder stalls on Intel CPUs. // // We could also do this for 64 bit masks that fit into 32 bit but it doesn't help. // In such cases morph narrows down the existing GT_AND by inserting a cast between it and // the memory operand so we'd need to add more code to recognize and eliminate that cast. // size_t mask = static_cast<size_t>(andOp2->AsIntCon()->IconValue()); if (FitsIn<UINT8>(mask)) { andOp1->gtType = TYP_UBYTE; andOp2->gtType = TYP_UBYTE; } else if (FitsIn<UINT16>(mask) && genTypeSize(andOp1) == 2) { andOp1->gtType = TYP_USHORT; andOp2->gtType = TYP_USHORT; } } #endif } } if (cmp->OperIs(GT_TEST_EQ, GT_TEST_NE)) { #ifdef TARGET_XARCH // // Transform TEST_EQ|NE(x, LSH(1, y)) into BT(x, y) when possible. Using BT // results in smaller and faster code. It also doesn't have special register // requirements, unlike LSH that requires the shift count to be in ECX. // Note that BT has the same behavior as LSH when the bit index exceeds the // operand bit size - it uses (bit_index MOD bit_size). // GenTree* lsh = cmp->gtGetOp2(); LIR::Use cmpUse; if (lsh->OperIs(GT_LSH) && varTypeIsIntOrI(lsh->TypeGet()) && lsh->gtGetOp1()->IsIntegralConst(1) && BlockRange().TryGetUse(cmp, &cmpUse)) { GenCondition condition = cmp->OperIs(GT_TEST_NE) ? GenCondition::C : GenCondition::NC; cmp->SetOper(GT_BT); cmp->gtType = TYP_VOID; cmp->gtFlags |= GTF_SET_FLAGS; cmp->AsOp()->gtOp2 = lsh->gtGetOp2(); cmp->gtGetOp2()->ClearContained(); BlockRange().Remove(lsh->gtGetOp1()); BlockRange().Remove(lsh); GenTreeCC* cc; if (cmpUse.User()->OperIs(GT_JTRUE)) { cmpUse.User()->ChangeOper(GT_JCC); cc = cmpUse.User()->AsCC(); cc->gtCondition = condition; } else { cc = new (comp, GT_SETCC) GenTreeCC(GT_SETCC, condition, TYP_INT); BlockRange().InsertAfter(cmp, cc); cmpUse.ReplaceWith(cc); } cc->gtFlags |= GTF_USE_FLAGS; return cmp->gtNext; } #endif // TARGET_XARCH } else if (cmp->OperIs(GT_EQ, GT_NE)) { GenTree* op1 = cmp->gtGetOp1(); GenTree* op2 = cmp->gtGetOp2(); // TODO-CQ: right now the below peep is inexpensive and gets the benefit in most // cases because in majority of cases op1, op2 and cmp would be in that order in // execution. In general we should be able to check that all the nodes that come // after op1 do not modify the flags so that it is safe to avoid generating a // test instruction. if (op2->IsIntegralConst(0) && (op1->gtNext == op2) && (op2->gtNext == cmp) && #ifdef TARGET_XARCH (op1->OperIs(GT_AND, GT_OR, GT_XOR, GT_ADD, GT_SUB, GT_NEG) #ifdef FEATURE_HW_INTRINSICS || (op1->OperIs(GT_HWINTRINSIC) && emitter::DoesWriteZeroFlag(HWIntrinsicInfo::lookupIns(op1->AsHWIntrinsic()))) #endif // FEATURE_HW_INTRINSICS ) #else // TARGET_ARM64 op1->OperIs(GT_AND, GT_ADD, GT_SUB) #endif ) { op1->gtFlags |= GTF_SET_FLAGS; op1->SetUnusedValue(); BlockRange().Remove(op2); GenTree* next = cmp->gtNext; GenTree* cc; genTreeOps ccOp; LIR::Use cmpUse; // Fast check for the common case - relop used by a JTRUE that immediately follows it. if ((next != nullptr) && next->OperIs(GT_JTRUE) && (next->gtGetOp1() == cmp)) { cc = next; ccOp = GT_JCC; next = nullptr; BlockRange().Remove(cmp); } else if (BlockRange().TryGetUse(cmp, &cmpUse) && cmpUse.User()->OperIs(GT_JTRUE)) { cc = cmpUse.User(); ccOp = GT_JCC; next = nullptr; BlockRange().Remove(cmp); } else // The relop is not used by a JTRUE or it is not used at all. { // Transform the relop node it into a SETCC. If it's not used we could remove // it completely but that means doing more work to handle a rare case. cc = cmp; ccOp = GT_SETCC; } GenCondition condition = GenCondition::FromIntegralRelop(cmp); cc->ChangeOper(ccOp); cc->AsCC()->gtCondition = condition; cc->gtFlags |= GTF_USE_FLAGS; return next; } } #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) return cmp; } //------------------------------------------------------------------------ // Lowering::LowerCompare: Lowers a compare node. // // Arguments: // cmp - the compare node // // Return Value: // The next node to lower. // GenTree* Lowering::LowerCompare(GenTree* cmp) { #ifndef TARGET_64BIT if (cmp->gtGetOp1()->TypeGet() == TYP_LONG) { return DecomposeLongCompare(cmp); } #endif if (cmp->gtGetOp2()->IsIntegralConst() && !comp->opts.MinOpts()) { GenTree* next = OptimizeConstCompare(cmp); // If OptimizeConstCompare return the compare node as "next" then we need to continue lowering. if (next != cmp) { return next; } } #ifdef TARGET_XARCH if (cmp->gtGetOp1()->TypeGet() == cmp->gtGetOp2()->TypeGet()) { if (varTypeIsSmall(cmp->gtGetOp1()->TypeGet()) && varTypeIsUnsigned(cmp->gtGetOp1()->TypeGet())) { // // If both operands have the same type then codegen will use the common operand type to // determine the instruction type. For small types this would result in performing a // signed comparison of two small unsigned values without zero extending them to TYP_INT // which is incorrect. Note that making the comparison unsigned doesn't imply that codegen // has to generate a small comparison, it can still correctly generate a TYP_INT comparison. // cmp->gtFlags |= GTF_UNSIGNED; } } #endif // TARGET_XARCH ContainCheckCompare(cmp->AsOp()); return cmp->gtNext; } //------------------------------------------------------------------------ // Lowering::LowerJTrue: Lowers a JTRUE node. // // Arguments: // jtrue - the JTRUE node // // Return Value: // The next node to lower (usually nullptr). // // Notes: // On ARM64 this may remove the JTRUE node and transform its associated // relop into a JCMP node. // GenTree* Lowering::LowerJTrue(GenTreeOp* jtrue) { #ifdef TARGET_ARM64 GenTree* relop = jtrue->gtGetOp1(); GenTree* relopOp2 = relop->AsOp()->gtGetOp2(); if ((relop->gtNext == jtrue) && relopOp2->IsCnsIntOrI()) { bool useJCMP = false; GenTreeFlags flags = GTF_EMPTY; if (relop->OperIs(GT_EQ, GT_NE) && relopOp2->IsIntegralConst(0)) { // Codegen will use cbz or cbnz in codegen which do not affect the flag register flags = relop->OperIs(GT_EQ) ? GTF_JCMP_EQ : GTF_EMPTY; useJCMP = true; } else if (relop->OperIs(GT_TEST_EQ, GT_TEST_NE) && isPow2(relopOp2->AsIntCon()->IconValue())) { // Codegen will use tbz or tbnz in codegen which do not affect the flag register flags = GTF_JCMP_TST | (relop->OperIs(GT_TEST_EQ) ? GTF_JCMP_EQ : GTF_EMPTY); useJCMP = true; } if (useJCMP) { relop->SetOper(GT_JCMP); relop->gtFlags &= ~(GTF_JCMP_TST | GTF_JCMP_EQ); relop->gtFlags |= flags; relop->gtType = TYP_VOID; relopOp2->SetContained(); BlockRange().Remove(jtrue); assert(relop->gtNext == nullptr); return nullptr; } } #endif // TARGET_ARM64 ContainCheckJTrue(jtrue); assert(jtrue->gtNext == nullptr); return nullptr; } //---------------------------------------------------------------------------------------------- // LowerNodeCC: Lowers a node that produces a boolean value by setting the condition flags. // // Arguments: // node - The node to lower // condition - The condition code of the generated SETCC/JCC node // // Return Value: // A SETCC/JCC node or nullptr if `node` is not used. // // Notes: // This simply replaces `node`'s use with an appropiate SETCC/JCC node, // `node` is not actually changed, except by having its GTF_SET_FLAGS set. // It's the caller's responsibility to change `node` such that it only // sets the condition flags, without producing a boolean value. // GenTreeCC* Lowering::LowerNodeCC(GenTree* node, GenCondition condition) { // Skip over a chain of EQ/NE(x, 0) relops. This may be present either // because `node` is not a relop and so it cannot be used directly by a // JTRUE, or because the frontend failed to remove a EQ/NE(x, 0) that's // used as logical negation. // // Usually there's only one such relop but there's little difference // between removing one or all so we may as well remove them all. // // We can't allow any other nodes between `node` and its user because we // have no way of knowing if those nodes change flags or not. So we're looking // to skip over a sequence of appropriately connected zero and EQ/NE nodes. // The x in EQ/NE(x, 0) GenTree* relop = node; // The first node of the relop sequence GenTree* first = node->gtNext; // The node following the relop sequence GenTree* next = first; while ((next != nullptr) && next->IsIntegralConst(0) && (next->gtNext != nullptr) && next->gtNext->OperIs(GT_EQ, GT_NE) && (next->gtNext->AsOp()->gtGetOp1() == relop) && (next->gtNext->AsOp()->gtGetOp2() == next)) { relop = next->gtNext; next = relop->gtNext; if (relop->OperIs(GT_EQ)) { condition = GenCondition::Reverse(condition); } } GenTreeCC* cc = nullptr; // Next may be null if `node` is not used. In that case we don't need to generate a SETCC node. if (next != nullptr) { if (next->OperIs(GT_JTRUE)) { // If the instruction immediately following 'relop', i.e. 'next' is a conditional branch, // it should always have 'relop' as its 'op1'. If it doesn't, then we have improperly // constructed IL (the setting of a condition code should always immediately precede its // use, since the JIT doesn't track dataflow for condition codes). Still, if it happens // it's not our problem, it simply means that `node` is not used and can be removed. if (next->AsUnOp()->gtGetOp1() == relop) { assert(relop->OperIsCompare()); next->ChangeOper(GT_JCC); cc = next->AsCC(); cc->gtCondition = condition; } } else { // If the node is used by something other than a JTRUE then we need to insert a // SETCC node to materialize the boolean value. LIR::Use use; if (BlockRange().TryGetUse(relop, &use)) { cc = new (comp, GT_SETCC) GenTreeCC(GT_SETCC, condition, TYP_INT); BlockRange().InsertAfter(node, cc); use.ReplaceWith(cc); } } } if (cc != nullptr) { node->gtFlags |= GTF_SET_FLAGS; cc->gtFlags |= GTF_USE_FLAGS; } // Remove the chain of EQ/NE(x, 0) relop nodes, if any. Note that if a SETCC was // inserted after `node`, `first` still points to the node that was initially // after `node`. if (relop != node) { BlockRange().Remove(first, relop); } return cc; } // Lower "jmp <method>" tail call to insert PInvoke method epilog if required. void Lowering::LowerJmpMethod(GenTree* jmp) { assert(jmp->OperGet() == GT_JMP); JITDUMP("lowering GT_JMP\n"); DISPNODE(jmp); JITDUMP("============"); // If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that // a method returns. if (comp->compMethodRequiresPInvokeFrame()) { InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(jmp)); } } // Lower GT_RETURN node to insert PInvoke method epilog if required. void Lowering::LowerRet(GenTreeUnOp* ret) { assert(ret->OperGet() == GT_RETURN); JITDUMP("lowering GT_RETURN\n"); DISPNODE(ret); JITDUMP("============"); GenTree* retVal = ret->gtGetOp1(); // There are two kinds of retyping: // - A simple bitcast can be inserted when: // - We're returning a floating type as an integral type or vice-versa, or // - If we're returning a struct as a primitive type, we change the type of // 'retval' in 'LowerRetStructLclVar()' bool needBitcast = (ret->TypeGet() != TYP_VOID) && (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(ret->gtGetOp1())); bool doPrimitiveBitcast = false; if (needBitcast) { doPrimitiveBitcast = (!varTypeIsStruct(ret) && !varTypeIsStruct(retVal)); } if (doPrimitiveBitcast) { // Add a simple bitcast when both types are not structs. // If one type is a struct it will be handled below. #if defined(DEBUG) assert(!varTypeIsStruct(ret) && !varTypeIsStruct(retVal)); #endif GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), retVal); ret->gtOp1 = bitcast; BlockRange().InsertBefore(ret, bitcast); ContainCheckBitCast(bitcast); } else if (ret->TypeGet() != TYP_VOID) { #if FEATURE_MULTIREG_RET if (retVal->OperIs(GT_LCL_VAR) && varTypeIsStruct(retVal)) { ReturnTypeDesc retTypeDesc; LclVarDsc* varDsc = nullptr; varDsc = comp->lvaGetDesc(retVal->AsLclVar()); retTypeDesc.InitializeStructReturnType(comp, varDsc->GetStructHnd(), comp->info.compCallConv); if (retTypeDesc.GetReturnRegCount() > 1) { CheckMultiRegLclVar(retVal->AsLclVar(), &retTypeDesc); } } #endif // FEATURE_MULTIREG_RET #ifdef DEBUG if (varTypeIsStruct(ret->TypeGet()) != varTypeIsStruct(retVal->TypeGet())) { if (varTypeIsStruct(ret->TypeGet())) { assert(comp->info.compRetNativeType != TYP_STRUCT); var_types retActualType = genActualType(comp->info.compRetNativeType); var_types retValActualType = genActualType(retVal->TypeGet()); bool constStructInit = retVal->IsConstInitVal(); bool implicitCastFromSameOrBiggerSize = (genTypeSize(retActualType) <= genTypeSize(retValActualType)); // This could happen if we have retyped op1 as a primitive type during struct promotion, // check `retypedFieldsMap` for details. bool actualTypesMatch = (retActualType == retValActualType); assert(actualTypesMatch || constStructInit || implicitCastFromSameOrBiggerSize); } } #endif // DEBUG if (varTypeIsStruct(ret)) { LowerRetStruct(ret); } else if (!ret->TypeIs(TYP_VOID) && varTypeIsStruct(retVal)) { // Return struct as a primitive using Unsafe cast. assert(retVal->OperIs(GT_LCL_VAR)); LowerRetSingleRegStructLclVar(ret); } } // Method doing PInvokes has exactly one return block unless it has tail calls. if (comp->compMethodRequiresPInvokeFrame() && (comp->compCurBB == comp->genReturnBB)) { InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(ret)); } ContainCheckRet(ret); } //---------------------------------------------------------------------------------------------- // LowerStoreLocCommon: platform idependent part of local var or field store lowering. // // Arguments: // lclStore - The store lcl node to lower. // void Lowering::LowerStoreLocCommon(GenTreeLclVarCommon* lclStore) { assert(lclStore->OperIs(GT_STORE_LCL_FLD, GT_STORE_LCL_VAR)); JITDUMP("lowering store lcl var/field (before):\n"); DISPTREERANGE(BlockRange(), lclStore); JITDUMP("\n"); GenTree* src = lclStore->gtGetOp1(); LclVarDsc* varDsc = comp->lvaGetDesc(lclStore); const bool srcIsMultiReg = src->IsMultiRegNode(); const bool dstIsMultiReg = lclStore->IsMultiRegLclVar(); if (!dstIsMultiReg && varTypeIsStruct(varDsc)) { // TODO-Cleanup: we want to check `varDsc->lvRegStruct` as the last condition instead of `!varDsc->lvPromoted`, // but we do not set it for `CSE` vars so it is currently failing. assert(varDsc->CanBeReplacedWithItsField(comp) || varDsc->lvDoNotEnregister || !varDsc->lvPromoted); if (varDsc->CanBeReplacedWithItsField(comp)) { assert(varDsc->lvFieldCnt == 1); unsigned fldNum = varDsc->lvFieldLclStart; LclVarDsc* fldDsc = comp->lvaGetDesc(fldNum); JITDUMP("Replacing an independently promoted local var V%02u with its only field V%02u for the store " "from a call [%06u]\n", lclStore->GetLclNum(), fldNum, comp->dspTreeID(lclStore)); lclStore->SetLclNum(fldNum); lclStore->ChangeType(fldDsc->TypeGet()); varDsc = fldDsc; } } if (srcIsMultiReg || dstIsMultiReg) { const ReturnTypeDesc* retTypeDesc = nullptr; if (src->OperIs(GT_CALL)) { retTypeDesc = src->AsCall()->GetReturnTypeDesc(); } CheckMultiRegLclVar(lclStore->AsLclVar(), retTypeDesc); } const var_types lclRegType = varDsc->GetRegisterType(lclStore); if ((lclStore->TypeGet() == TYP_STRUCT) && !srcIsMultiReg) { bool convertToStoreObj; if (src->OperGet() == GT_CALL) { GenTreeCall* call = src->AsCall(); const ClassLayout* layout = varDsc->GetLayout(); #ifdef DEBUG const unsigned slotCount = layout->GetSlotCount(); #if defined(TARGET_XARCH) && !defined(UNIX_AMD64_ABI) // Windows x64 doesn't have multireg returns, // x86 uses it only for long return type, not for structs. assert(slotCount == 1); assert(lclRegType != TYP_UNDEF); #else // !TARGET_XARCH || UNIX_AMD64_ABI if (!varDsc->lvIsHfa()) { if (slotCount > 1) { assert(call->HasMultiRegRetVal()); } else { unsigned size = layout->GetSize(); assert((size <= 8) || (size == 16)); bool isPowerOf2 = (((size - 1) & size) == 0); bool isTypeDefined = (lclRegType != TYP_UNDEF); assert(isPowerOf2 == isTypeDefined); } } #endif // !TARGET_XARCH || UNIX_AMD64_ABI #endif // DEBUG #if !defined(WINDOWS_AMD64_ABI) if (!call->HasMultiRegRetVal() && (lclRegType == TYP_UNDEF)) { // If we have a single return register, // but we can't retype it as a primitive type, we must spill it. GenTreeLclVar* spilledCall = SpillStructCallResult(call); lclStore->gtOp1 = spilledCall; src = lclStore->gtOp1; JITDUMP("lowering store lcl var/field has to spill call src.\n"); LowerStoreLocCommon(lclStore); return; } #endif // !WINDOWS_AMD64_ABI convertToStoreObj = false; } else if (!varDsc->IsEnregisterableType()) { convertToStoreObj = true; } else if (src->OperIs(GT_CNS_INT)) { assert(src->IsIntegralConst(0) && "expected an INIT_VAL for non-zero init."); #ifdef FEATURE_SIMD if (varTypeIsSIMD(lclRegType)) { CorInfoType simdBaseJitType = comp->getBaseJitTypeOfSIMDLocal(lclStore); if (simdBaseJitType == CORINFO_TYPE_UNDEF) { // Lie about the type if we don't know/have it. simdBaseJitType = CORINFO_TYPE_FLOAT; } GenTreeSIMD* simdTree = comp->gtNewSIMDNode(lclRegType, src, SIMDIntrinsicInit, simdBaseJitType, varDsc->lvExactSize); BlockRange().InsertAfter(src, simdTree); LowerSIMD(simdTree); src = simdTree; lclStore->gtOp1 = src; convertToStoreObj = false; } else #endif // FEATURE_SIMD { convertToStoreObj = false; } } else if (!src->OperIs(GT_LCL_VAR)) { convertToStoreObj = true; } else { assert(src->OperIs(GT_LCL_VAR)); convertToStoreObj = false; } if (convertToStoreObj) { const unsigned lclNum = lclStore->GetLclNum(); GenTreeLclVar* addr = comp->gtNewLclVarAddrNode(lclNum, TYP_BYREF); comp->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::BlockOp)); addr->gtFlags |= GTF_VAR_DEF; assert(!addr->IsPartialLclFld(comp)); addr->gtFlags |= GTF_DONT_CSE; // Create the assignment node. lclStore->ChangeOper(GT_STORE_OBJ); GenTreeBlk* objStore = lclStore->AsObj(); // Only the GTF_LATE_ARG flag (if present) is preserved. objStore->gtFlags &= GTF_LATE_ARG; objStore->gtFlags |= GTF_ASG | GTF_IND_NONFAULTING | GTF_IND_TGT_NOT_HEAP; #ifndef JIT32_GCENCODER objStore->gtBlkOpGcUnsafe = false; #endif objStore->gtBlkOpKind = GenTreeObj::BlkOpKindInvalid; objStore->SetLayout(varDsc->GetLayout()); objStore->SetAddr(addr); objStore->SetData(src); BlockRange().InsertBefore(objStore, addr); LowerBlockStoreCommon(objStore); return; } } // src and dst can be in registers, check if we need a bitcast. if (!src->TypeIs(TYP_STRUCT) && (varTypeUsesFloatReg(lclRegType) != varTypeUsesFloatReg(src))) { assert(!srcIsMultiReg && !dstIsMultiReg); assert(lclStore->OperIsLocalStore()); assert(lclRegType != TYP_UNDEF); GenTree* bitcast = comp->gtNewBitCastNode(lclRegType, src); lclStore->gtOp1 = bitcast; src = lclStore->gtGetOp1(); BlockRange().InsertBefore(lclStore, bitcast); ContainCheckBitCast(bitcast); } LowerStoreLoc(lclStore); JITDUMP("lowering store lcl var/field (after):\n"); DISPTREERANGE(BlockRange(), lclStore); JITDUMP("\n"); } //---------------------------------------------------------------------------------------------- // LowerRetStructLclVar: Lowers a struct return node. // // Arguments: // node - The return node to lower. // void Lowering::LowerRetStruct(GenTreeUnOp* ret) { #ifdef TARGET_ARM64 if (GlobalJitOptions::compFeatureHfa) { if (varTypeIsSIMD(ret)) { if (comp->info.compRetNativeType == TYP_STRUCT) { assert(varTypeIsSIMD(ret->gtGetOp1())); assert(comp->compMethodReturnsMultiRegRegTypeAlternate()); ret->ChangeType(comp->info.compRetNativeType); } else { assert(comp->info.compRetNativeType == ret->TypeGet()); GenTree* retVal = ret->gtGetOp1(); if (retVal->TypeGet() != ret->TypeGet()) { assert(retVal->OperIs(GT_LCL_VAR)); LowerRetSingleRegStructLclVar(ret); } return; } } } #endif // TARGET_ARM64 if (comp->compMethodReturnsMultiRegRegTypeAlternate()) { return; } assert(ret->OperIs(GT_RETURN)); assert(varTypeIsStruct(ret)); GenTree* retVal = ret->gtGetOp1(); // Note: small types are returned as INT. var_types nativeReturnType = genActualType(comp->info.compRetNativeType); ret->ChangeType(nativeReturnType); switch (retVal->OperGet()) { case GT_CALL: assert(retVal->TypeIs(nativeReturnType)); // Type should be changed during call processing. break; case GT_CNS_INT: // When we promote LCL_VAR single fields into return // we could have all type of constans here. if (varTypeUsesFloatReg(nativeReturnType)) { // Do not expect `initblock` for SIMD* types, // only 'initobj'. assert(retVal->AsIntCon()->IconValue() == 0); retVal->BashToConst(0.0, TYP_FLOAT); } break; case GT_OBJ: retVal->ChangeOper(GT_IND); FALLTHROUGH; case GT_IND: retVal->ChangeType(nativeReturnType); LowerIndir(retVal->AsIndir()); break; case GT_LCL_VAR: LowerRetSingleRegStructLclVar(ret); break; #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) #ifdef FEATURE_SIMD case GT_SIMD: #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: #endif // FEATURE_HW_INTRINSICS { assert(!retVal->TypeIs(TYP_STRUCT)); if (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(retVal)) { GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), retVal); ret->gtOp1 = bitcast; BlockRange().InsertBefore(ret, bitcast); ContainCheckBitCast(bitcast); } } break; #endif // FEATURE_SIMD || FEATURE_HW_INTRINSICS case GT_LCL_FLD: { #ifdef DEBUG LclVarDsc* varDsc = comp->lvaGetDesc(retVal->AsLclFld()); assert(varDsc->lvDoNotEnregister); #endif retVal->ChangeType(nativeReturnType); } break; default: assert(varTypeIsEnregisterable(retVal)); if (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(retVal)) { GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), retVal); ret->gtOp1 = bitcast; BlockRange().InsertBefore(ret, bitcast); ContainCheckBitCast(bitcast); } break; } } //---------------------------------------------------------------------------------------------- // LowerRetSingleRegStructLclVar: Lowers a return node with a struct lclVar as a source. // // Arguments: // node - The return node to lower. // // Notes: // - the function is only for LclVars that are returned in one register; // - if LclVar is allocated in memory then read it as return type; // - if LclVar can be enregistered read it as register type and add a bitcast if necessary; // void Lowering::LowerRetSingleRegStructLclVar(GenTreeUnOp* ret) { assert(!comp->compMethodReturnsMultiRegRegTypeAlternate()); assert(ret->OperIs(GT_RETURN)); GenTreeLclVarCommon* lclVar = ret->gtGetOp1()->AsLclVar(); assert(lclVar->OperIs(GT_LCL_VAR)); unsigned lclNum = lclVar->GetLclNum(); LclVarDsc* varDsc = comp->lvaGetDesc(lclNum); if (varDsc->lvPromoted) { // TODO-1stClassStructs: We can no longer independently promote // or enregister this struct, since it is referenced as a whole. comp->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::BlockOpRet)); } if (varDsc->lvDoNotEnregister) { lclVar->ChangeOper(GT_LCL_FLD); lclVar->AsLclFld()->SetLclOffs(0); // We are returning as a primitive type and the lcl is of struct type. assert(comp->info.compRetNativeType != TYP_STRUCT); assert((genTypeSize(comp->info.compRetNativeType) == genTypeSize(ret)) || (varTypeIsIntegral(ret) && varTypeIsIntegral(comp->info.compRetNativeType) && (genTypeSize(comp->info.compRetNativeType) <= genTypeSize(ret)))); // If the actual return type requires normalization, then make sure we // do so by using the correct small type for the GT_LCL_FLD. It would // be conservative to check just compRetNativeType for this since small // structs are normalized to primitive types when they are returned in // registers, so we would normalize for them as well. if (varTypeIsSmall(comp->info.compRetType)) { assert(genTypeSize(comp->info.compRetNativeType) == genTypeSize(comp->info.compRetType)); lclVar->ChangeType(comp->info.compRetType); } else { // Otherwise we don't mind that we leave the upper bits undefined. lclVar->ChangeType(ret->TypeGet()); } } else { const var_types lclVarType = varDsc->GetRegisterType(lclVar); assert(lclVarType != TYP_UNDEF); const var_types actualType = genActualType(lclVarType); lclVar->ChangeType(actualType); if (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(lclVarType)) { GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), ret->gtOp1); ret->gtOp1 = bitcast; BlockRange().InsertBefore(ret, bitcast); ContainCheckBitCast(bitcast); } } } //---------------------------------------------------------------------------------------------- // LowerCallStruct: Lowers a call node that returns a stuct. // // Arguments: // call - The call node to lower. // // Notes: // - this handles only single-register returns; // - it transforms the call's user for `GT_STOREIND`. // void Lowering::LowerCallStruct(GenTreeCall* call) { assert(varTypeIsStruct(call)); if (call->HasMultiRegRetVal()) { return; } if (GlobalJitOptions::compFeatureHfa) { if (comp->IsHfa(call)) { #if defined(TARGET_ARM64) assert(comp->GetHfaCount(call) == 1); #elif defined(TARGET_ARM) // ARM returns double in 2 float registers, but // `call->HasMultiRegRetVal()` count double registers. assert(comp->GetHfaCount(call) <= 2); #else // !TARGET_ARM64 && !TARGET_ARM NYI("Unknown architecture"); #endif // !TARGET_ARM64 && !TARGET_ARM var_types hfaType = comp->GetHfaType(call); if (call->TypeIs(hfaType)) { return; } } } CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd; Compiler::structPassingKind howToReturnStruct; var_types returnType = comp->getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct); assert(returnType != TYP_STRUCT && returnType != TYP_UNKNOWN); var_types origType = call->TypeGet(); call->gtType = genActualType(returnType); LIR::Use callUse; if (BlockRange().TryGetUse(call, &callUse)) { GenTree* user = callUse.User(); switch (user->OperGet()) { case GT_RETURN: case GT_STORE_LCL_VAR: case GT_STORE_BLK: case GT_STORE_OBJ: // Leave as is, the user will handle it. assert(user->TypeIs(origType) || varTypeIsSIMD(user->TypeGet())); break; #ifdef FEATURE_SIMD case GT_STORE_LCL_FLD: // If the call type was ever updated (in importer) to TYP_SIMD*, it should match the user type. // If not, the user type should match the struct's returnType. assert((varTypeIsSIMD(user) && user->TypeIs(origType)) || (returnType == user->TypeGet())); break; #endif // FEATURE_SIMD case GT_STOREIND: #ifdef FEATURE_SIMD if (varTypeIsSIMD(user)) { user->ChangeType(returnType); break; } #endif // FEATURE_SIMD // importer has a separate mechanism to retype calls to helpers, // keep it for now. assert(user->TypeIs(TYP_REF) || (user->TypeIs(TYP_I_IMPL) && comp->IsTargetAbi(CORINFO_CORERT_ABI))); assert(call->IsHelperCall()); assert(returnType == user->TypeGet()); break; default: unreached(); } } } //---------------------------------------------------------------------------------------------- // LowerStoreSingleRegCallStruct: Lowers a store block where the source is a struct typed call. // // Arguments: // store - The store node to lower. // // Notes: // - the function is only for calls that return one register; // - it spills the call's result if it can be retyped as a primitive type; // void Lowering::LowerStoreSingleRegCallStruct(GenTreeBlk* store) { assert(store->Data()->IsCall()); GenTreeCall* call = store->Data()->AsCall(); assert(!call->HasMultiRegRetVal()); const ClassLayout* layout = store->GetLayout(); const var_types regType = layout->GetRegisterType(); if (regType != TYP_UNDEF) { store->ChangeType(regType); store->SetOper(GT_STOREIND); LowerStoreIndirCommon(store->AsStoreInd()); return; } else { #if defined(WINDOWS_AMD64_ABI) // All ABI except Windows x64 supports passing 3 byte structs in registers. // Other 64 bites ABI-s support passing 5, 6, 7 byte structs. unreached(); #else // !WINDOWS_AMD64_ABI if (store->OperIs(GT_STORE_OBJ)) { store->SetOper(GT_STORE_BLK); } store->gtBlkOpKind = GenTreeObj::BlkOpKindUnroll; GenTreeLclVar* spilledCall = SpillStructCallResult(call); store->SetData(spilledCall); LowerBlockStoreCommon(store); #endif // WINDOWS_AMD64_ABI } } #if !defined(WINDOWS_AMD64_ABI) //---------------------------------------------------------------------------------------------- // SpillStructCallResult: Spill call result to memory. // // Arguments: // call - call with 3, 5, 6 or 7 return size that has to be spilled to memory. // // Return Value: // load of the spilled variable. // GenTreeLclVar* Lowering::SpillStructCallResult(GenTreeCall* call) const { // TODO-1stClassStructs: we can support this in codegen for `GT_STORE_BLK` without new temps. const unsigned spillNum = comp->lvaGrabTemp(true DEBUGARG("Return value temp for an odd struct return size")); comp->lvaSetVarDoNotEnregister(spillNum DEBUGARG(DoNotEnregisterReason::LocalField)); CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd; comp->lvaSetStruct(spillNum, retClsHnd, false); GenTreeLclFld* spill = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, call->gtType, spillNum, 0); spill->gtOp1 = call; spill->gtFlags |= GTF_VAR_DEF; BlockRange().InsertAfter(call, spill); ContainCheckStoreLoc(spill); GenTreeLclVar* loadCallResult = comp->gtNewLclvNode(spillNum, TYP_STRUCT)->AsLclVar(); BlockRange().InsertAfter(spill, loadCallResult); return loadCallResult; } #endif // !WINDOWS_AMD64_ABI GenTree* Lowering::LowerDirectCall(GenTreeCall* call) { noway_assert(call->gtCallType == CT_USER_FUNC || call->gtCallType == CT_HELPER); // Non-virtual direct/indirect calls: Work out if the address of the // call is known at JIT time. If not it is either an indirect call // or the address must be accessed via an single/double indirection. void* addr; InfoAccessType accessType; CorInfoHelpFunc helperNum = comp->eeGetHelperNum(call->gtCallMethHnd); #ifdef FEATURE_READYTORUN if (call->gtEntryPoint.addr != nullptr) { accessType = call->gtEntryPoint.accessType; addr = call->gtEntryPoint.addr; } else #endif if (call->gtCallType == CT_HELPER) { noway_assert(helperNum != CORINFO_HELP_UNDEF); // the convention on getHelperFtn seems to be (it's not documented) // that it returns an address or if it returns null, pAddr is set to // another address, which requires an indirection void* pAddr; addr = comp->info.compCompHnd->getHelperFtn(helperNum, (void**)&pAddr); if (addr != nullptr) { assert(pAddr == nullptr); accessType = IAT_VALUE; } else { accessType = IAT_PVALUE; addr = pAddr; } } else { noway_assert(helperNum == CORINFO_HELP_UNDEF); CORINFO_ACCESS_FLAGS aflags = CORINFO_ACCESS_ANY; if (call->IsSameThis()) { aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_THIS); } if (!call->NeedsNullCheck()) { aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_NONNULL); } CORINFO_CONST_LOOKUP addrInfo; comp->info.compCompHnd->getFunctionEntryPoint(call->gtCallMethHnd, &addrInfo, aflags); accessType = addrInfo.accessType; addr = addrInfo.addr; } GenTree* result = nullptr; switch (accessType) { case IAT_VALUE: // Non-virtual direct call to known address. // For JIT helper based tailcall (only used on x86) the target // address is passed as an arg to the helper so we want a node for // it. if (!IsCallTargetInRange(addr) || call->IsTailCallViaJitHelper()) { result = AddrGen(addr); } else { // a direct call within range of hardware relative call instruction // stash the address for codegen call->gtDirectCallAddress = addr; } break; case IAT_PVALUE: { // If we are using an indirection cell for a direct call then apply // an optimization that loads the call target directly from the // indirection cell, instead of duplicating the tree. bool hasIndirectionCell = call->GetIndirectionCellArgKind() != NonStandardArgKind::None; if (!hasIndirectionCell) { // Non-virtual direct calls to addresses accessed by // a single indirection. GenTree* cellAddr = AddrGen(addr); #ifdef DEBUG cellAddr->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd; #endif GenTree* indir = Ind(cellAddr); result = indir; } break; } case IAT_PPVALUE: // Non-virtual direct calls to addresses accessed by // a double indirection. // // Expanding an IAT_PPVALUE here, will lose the opportunity // to Hoist/CSE the first indirection as it is an invariant load // assert(!"IAT_PPVALUE case in LowerDirectCall"); noway_assert(helperNum == CORINFO_HELP_UNDEF); result = AddrGen(addr); // Double-indirection. Load the address into a register // and call indirectly through the register // result = Ind(Ind(result)); break; case IAT_RELPVALUE: { // Non-virtual direct calls to addresses accessed by // a single relative indirection. GenTree* cellAddr = AddrGen(addr); GenTree* indir = Ind(cellAddr); result = comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, indir, AddrGen(addr)); break; } default: noway_assert(!"Bad accessType"); break; } return result; } GenTree* Lowering::LowerDelegateInvoke(GenTreeCall* call) { noway_assert(call->gtCallType == CT_USER_FUNC); assert((comp->info.compCompHnd->getMethodAttribs(call->gtCallMethHnd) & (CORINFO_FLG_DELEGATE_INVOKE | CORINFO_FLG_FINAL)) == (CORINFO_FLG_DELEGATE_INVOKE | CORINFO_FLG_FINAL)); GenTree* thisArgNode; if (call->IsTailCallViaJitHelper()) { const unsigned argNum = 0; fgArgTabEntry* thisArgTabEntry = comp->gtArgEntryByArgNum(call, argNum); thisArgNode = thisArgTabEntry->GetNode(); } else { thisArgNode = comp->gtGetThisArg(call); } assert(thisArgNode != nullptr); assert(thisArgNode->gtOper == GT_PUTARG_REG); GenTree* thisExpr = thisArgNode->AsOp()->gtOp1; // We're going to use the 'this' expression multiple times, so make a local to copy it. GenTree* base; if (thisExpr->OperIs(GT_LCL_VAR)) { base = comp->gtNewLclvNode(thisExpr->AsLclVar()->GetLclNum(), thisExpr->TypeGet()); } else if (thisExpr->OperIs(GT_LCL_FLD)) { base = comp->gtNewLclFldNode(thisExpr->AsLclFld()->GetLclNum(), thisExpr->TypeGet(), thisExpr->AsLclFld()->GetLclOffs()); } else { unsigned delegateInvokeTmp = comp->lvaGrabTemp(true DEBUGARG("delegate invoke call")); base = comp->gtNewLclvNode(delegateInvokeTmp, thisExpr->TypeGet()); LIR::Use thisExprUse(BlockRange(), &thisArgNode->AsOp()->gtOp1, thisArgNode); ReplaceWithLclVar(thisExprUse, delegateInvokeTmp); thisExpr = thisExprUse.Def(); // it's changed; reload it. } // replace original expression feeding into thisPtr with // [originalThis + offsetOfDelegateInstance] GenTree* newThisAddr = new (comp, GT_LEA) GenTreeAddrMode(TYP_BYREF, thisExpr, nullptr, 0, comp->eeGetEEInfo()->offsetOfDelegateInstance); GenTree* newThis = comp->gtNewOperNode(GT_IND, TYP_REF, newThisAddr); BlockRange().InsertAfter(thisExpr, newThisAddr, newThis); thisArgNode->AsOp()->gtOp1 = newThis; ContainCheckIndir(newThis->AsIndir()); // the control target is // [originalThis + firstTgtOffs] unsigned targetOffs = comp->eeGetEEInfo()->offsetOfDelegateFirstTarget; GenTree* result = new (comp, GT_LEA) GenTreeAddrMode(TYP_REF, base, nullptr, 0, targetOffs); GenTree* callTarget = Ind(result); // don't need to sequence and insert this tree, caller will do it return callTarget; } GenTree* Lowering::LowerIndirectNonvirtCall(GenTreeCall* call) { #ifdef TARGET_X86 if (call->gtCallCookie != nullptr) { NYI_X86("Morphing indirect non-virtual call with non-standard args"); } #endif // Indirect cookie calls gets transformed by fgMorphArgs as indirect call with non-standard args. // Hence we should never see this type of call in lower. noway_assert(call->gtCallCookie == nullptr); return nullptr; } //------------------------------------------------------------------------ // CreateReturnTrapSeq: Create a tree to perform a "return trap", used in PInvoke // epilogs to invoke a GC under a condition. The return trap checks some global // location (the runtime tells us where that is and how many indirections to make), // then, based on the result, conditionally calls a GC helper. We use a special node // for this because at this time (late in the compilation phases), introducing flow // is tedious/difficult. // // This is used for PInvoke inlining. // // Return Value: // Code tree to perform the action. // GenTree* Lowering::CreateReturnTrapSeq() { // The GT_RETURNTRAP node expands to this: // if (g_TrapReturningThreads) // { // RareDisablePreemptiveGC(); // } // The only thing to do here is build up the expression that evaluates 'g_TrapReturningThreads'. void* pAddrOfCaptureThreadGlobal = nullptr; int32_t* addrOfCaptureThreadGlobal = comp->info.compCompHnd->getAddrOfCaptureThreadGlobal(&pAddrOfCaptureThreadGlobal); GenTree* testTree; if (addrOfCaptureThreadGlobal != nullptr) { testTree = AddrGen(addrOfCaptureThreadGlobal); } else { testTree = Ind(AddrGen(pAddrOfCaptureThreadGlobal)); } return comp->gtNewOperNode(GT_RETURNTRAP, TYP_INT, Ind(testTree, TYP_INT)); } //------------------------------------------------------------------------ // SetGCState: Create a tree that stores the given constant (0 or 1) into the // thread's GC state field. // // This is used for PInvoke inlining. // // Arguments: // state - constant (0 or 1) to store into the thread's GC state field. // // Return Value: // Code tree to perform the action. // GenTree* Lowering::SetGCState(int state) { // Thread.offsetOfGcState = 0/1 assert(state == 0 || state == 1); const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo(); GenTree* base = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, TYP_I_IMPL, comp->info.compLvFrameListRoot); GenTree* stateNode = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_BYTE, state); GenTree* addr = new (comp, GT_LEA) GenTreeAddrMode(TYP_I_IMPL, base, nullptr, 1, pInfo->offsetOfGCState); GenTree* storeGcState = new (comp, GT_STOREIND) GenTreeStoreInd(TYP_BYTE, addr, stateNode); return storeGcState; } //------------------------------------------------------------------------ // CreateFrameLinkUpdate: Create a tree that either links or unlinks the // locally-allocated InlinedCallFrame from the Frame list. // // This is used for PInvoke inlining. // // Arguments: // action - whether to link (push) or unlink (pop) the Frame // // Return Value: // Code tree to perform the action. // GenTree* Lowering::CreateFrameLinkUpdate(FrameLinkAction action) { const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo(); const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = pInfo->inlinedCallFrameInfo; GenTree* TCB = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, TYP_I_IMPL, comp->info.compLvFrameListRoot); // Thread->m_pFrame GenTree* addr = new (comp, GT_LEA) GenTreeAddrMode(TYP_I_IMPL, TCB, nullptr, 1, pInfo->offsetOfThreadFrame); GenTree* data = nullptr; if (action == PushFrame) { // Thread->m_pFrame = &inlinedCallFrame; data = new (comp, GT_LCL_FLD_ADDR) GenTreeLclFld(GT_LCL_FLD_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfFrameVptr); } else { assert(action == PopFrame); // Thread->m_pFrame = inlinedCallFrame.m_pNext; data = new (comp, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, pInfo->inlinedCallFrameInfo.offsetOfFrameLink); } GenTree* storeInd = new (comp, GT_STOREIND) GenTreeStoreInd(TYP_I_IMPL, addr, data); return storeInd; } //------------------------------------------------------------------------ // InsertPInvokeMethodProlog: Create the code that runs at the start of // every method that has PInvoke calls. // // Initialize the TCB local and the InlinedCallFrame object. Then link ("push") // the InlinedCallFrame object on the Frame chain. The layout of InlinedCallFrame // is defined in vm/frames.h. See also vm/jitinterface.cpp for more information. // The offsets of these fields is returned by the VM in a call to ICorStaticInfo::getEEInfo(). // // The (current) layout is as follows: // // 64-bit 32-bit CORINFO_EE_INFO // offset offset field name offset when set // ----------------------------------------------------------------------------------------- // +00h +00h GS cookie offsetOfGSCookie // +08h +04h vptr for class InlinedCallFrame offsetOfFrameVptr method prolog // +10h +08h m_Next offsetOfFrameLink method prolog // +18h +0Ch m_Datum offsetOfCallTarget call site // +20h n/a m_StubSecretArg not set by JIT // +28h +10h m_pCallSiteSP offsetOfCallSiteSP x86: call site, and zeroed in method // prolog; // non-x86: method prolog (SP remains // constant in function, after prolog: no // localloc and PInvoke in same function) // +30h +14h m_pCallerReturnAddress offsetOfReturnAddress call site // +38h +18h m_pCalleeSavedFP offsetOfCalleeSavedFP not set by JIT // +1Ch m_pThread // +20h m_pSPAfterProlog offsetOfSPAfterProlog arm only // +20/24h JIT retval spill area (int) before call_gc ??? // +24/28h JIT retval spill area (long) before call_gc ??? // +28/2Ch Saved value of EBP method prolog ??? // // Note that in the VM, InlinedCallFrame is a C++ class whose objects have a 'this' pointer that points // to the InlinedCallFrame vptr (the 2nd field listed above), and the GS cookie is stored *before* // the object. When we link the InlinedCallFrame onto the Frame chain, we must point at this location, // and not at the beginning of the InlinedCallFrame local, which is actually the GS cookie. // // Return Value: // none // void Lowering::InsertPInvokeMethodProlog() { noway_assert(comp->info.compUnmanagedCallCountWithGCTransition); noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM); if (comp->opts.ShouldUsePInvokeHelpers()) { return; } JITDUMP("======= Inserting PInvoke method prolog\n"); // The first BB must be a scratch BB in order for us to be able to safely insert the P/Invoke prolog. assert(comp->fgFirstBBisScratch()); LIR::Range& firstBlockRange = LIR::AsRange(comp->fgFirstBB); const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo(); const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = pInfo->inlinedCallFrameInfo; // First arg: &compiler->lvaInlinedPInvokeFrameVar + callFrameInfo.offsetOfFrameVptr #if defined(DEBUG) const LclVarDsc* inlinedPInvokeDsc = comp->lvaGetDesc(comp->lvaInlinedPInvokeFrameVar); assert(inlinedPInvokeDsc->IsAddressExposed()); #endif // DEBUG GenTree* frameAddr = new (comp, GT_LCL_FLD_ADDR) GenTreeLclFld(GT_LCL_FLD_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfFrameVptr); // Call runtime helper to fill in our InlinedCallFrame and push it on the Frame list: // TCB = CORINFO_HELP_INIT_PINVOKE_FRAME(&symFrameStart, secretArg); // for x86, don't pass the secretArg. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) || defined(TARGET_ARM) GenTreeCall::Use* argList = comp->gtNewCallArgs(frameAddr); #else GenTreeCall::Use* argList = comp->gtNewCallArgs(frameAddr, PhysReg(REG_SECRET_STUB_PARAM)); #endif GenTree* call = comp->gtNewHelperCallNode(CORINFO_HELP_INIT_PINVOKE_FRAME, TYP_I_IMPL, argList); // some sanity checks on the frame list root vardsc const unsigned lclNum = comp->info.compLvFrameListRoot; const LclVarDsc* varDsc = comp->lvaGetDesc(lclNum); noway_assert(!varDsc->lvIsParam); noway_assert(varDsc->lvType == TYP_I_IMPL); GenTree* store = new (comp, GT_STORE_LCL_VAR) GenTreeLclVar(GT_STORE_LCL_VAR, TYP_I_IMPL, lclNum); store->AsOp()->gtOp1 = call; store->gtFlags |= GTF_VAR_DEF; GenTree* const insertionPoint = firstBlockRange.FirstNonCatchArgNode(); comp->fgMorphTree(store); firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, store)); DISPTREERANGE(firstBlockRange, store); #if !defined(TARGET_X86) && !defined(TARGET_ARM) // For x86, this step is done at the call site (due to stack pointer not being static in the function). // For arm32, CallSiteSP is set up by the call to CORINFO_HELP_INIT_PINVOKE_FRAME. // -------------------------------------------------------- // InlinedCallFrame.m_pCallSiteSP = @RSP; GenTreeLclFld* storeSP = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCallSiteSP); storeSP->gtOp1 = PhysReg(REG_SPBASE); storeSP->gtFlags |= GTF_VAR_DEF; assert(inlinedPInvokeDsc->lvDoNotEnregister); firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, storeSP)); DISPTREERANGE(firstBlockRange, storeSP); #endif // !defined(TARGET_X86) && !defined(TARGET_ARM) #if !defined(TARGET_ARM) // For arm32, CalleeSavedFP is set up by the call to CORINFO_HELP_INIT_PINVOKE_FRAME. // -------------------------------------------------------- // InlinedCallFrame.m_pCalleeSavedEBP = @RBP; GenTreeLclFld* storeFP = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCalleeSavedFP); assert(inlinedPInvokeDsc->lvDoNotEnregister); storeFP->gtOp1 = PhysReg(REG_FPBASE); storeFP->gtFlags |= GTF_VAR_DEF; firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, storeFP)); DISPTREERANGE(firstBlockRange, storeFP); #endif // !defined(TARGET_ARM) // -------------------------------------------------------- // On 32-bit targets, CORINFO_HELP_INIT_PINVOKE_FRAME initializes the PInvoke frame and then pushes it onto // the current thread's Frame stack. On 64-bit targets, it only initializes the PInvoke frame. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) { // Push a frame - if we are NOT in an IL stub, this is done right before the call // The init routine sets InlinedCallFrame's m_pNext, so we just set the thead's top-of-stack GenTree* frameUpd = CreateFrameLinkUpdate(PushFrame); firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, frameUpd)); ContainCheckStoreIndir(frameUpd->AsStoreInd()); DISPTREERANGE(firstBlockRange, frameUpd); } #endif // TARGET_64BIT } //------------------------------------------------------------------------ // InsertPInvokeMethodEpilog: Code that needs to be run when exiting any method // that has PInvoke inlines. This needs to be inserted any place you can exit the // function: returns, tailcalls and jmps. // // Arguments: // returnBB - basic block from which a method can return // lastExpr - GenTree of the last top level stmnt of returnBB (debug only arg) // // Return Value: // Code tree to perform the action. // void Lowering::InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* lastExpr)) { assert(returnBB != nullptr); assert(comp->info.compUnmanagedCallCountWithGCTransition); if (comp->opts.ShouldUsePInvokeHelpers()) { return; } JITDUMP("======= Inserting PInvoke method epilog\n"); // Method doing PInvoke calls has exactly one return block unless it has "jmp" or tail calls. assert(((returnBB == comp->genReturnBB) && (returnBB->bbJumpKind == BBJ_RETURN)) || returnBB->endsWithTailCallOrJmp(comp)); LIR::Range& returnBlockRange = LIR::AsRange(returnBB); GenTree* insertionPoint = returnBlockRange.LastNode(); assert(insertionPoint == lastExpr); // Note: PInvoke Method Epilog (PME) needs to be inserted just before GT_RETURN, GT_JMP or GT_CALL node in execution // order so that it is guaranteed that there will be no further PInvokes after that point in the method. // // Example1: GT_RETURN(op1) - say execution order is: Op1, GT_RETURN. After inserting PME, execution order would be // Op1, PME, GT_RETURN // // Example2: GT_CALL(arg side effect computing nodes, Stk Args Setup, Reg Args setup). The execution order would be // arg side effect computing nodes, Stk Args setup, Reg Args setup, GT_CALL // After inserting PME execution order would be: // arg side effect computing nodes, Stk Args setup, Reg Args setup, PME, GT_CALL // // Example3: GT_JMP. After inserting PME execution order would be: PME, GT_JMP // That is after PME, args for GT_JMP call will be setup. // Pop the frame if necessary. This always happens in the epilog on 32-bit targets. For 64-bit targets, we only do // this in the epilog for IL stubs; for non-IL stubs the frame is popped after every PInvoke call. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) #endif // TARGET_64BIT { GenTree* frameUpd = CreateFrameLinkUpdate(PopFrame); returnBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, frameUpd)); ContainCheckStoreIndir(frameUpd->AsStoreInd()); } } //------------------------------------------------------------------------ // InsertPInvokeCallProlog: Emit the call-site prolog for direct calls to unmanaged code. // It does all the necessary call-site setup of the InlinedCallFrame. // // Arguments: // call - the call for which we are inserting the PInvoke prolog. // // Return Value: // None. // void Lowering::InsertPInvokeCallProlog(GenTreeCall* call) { JITDUMP("======= Inserting PInvoke call prolog\n"); GenTree* insertBefore = call; if (call->gtCallType == CT_INDIRECT) { bool isClosed; insertBefore = BlockRange().GetTreeRange(call->gtCallAddr, &isClosed).FirstNode(); assert(isClosed); } const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = comp->eeGetEEInfo()->inlinedCallFrameInfo; gtCallTypes callType = (gtCallTypes)call->gtCallType; noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM); if (comp->opts.ShouldUsePInvokeHelpers()) { // First argument is the address of the frame variable. GenTree* frameAddr = new (comp, GT_LCL_VAR_ADDR) GenTreeLclVar(GT_LCL_VAR_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar); #if defined(TARGET_X86) && !defined(UNIX_X86_ABI) // On x86 targets, PInvoke calls need the size of the stack args in InlinedCallFrame.m_Datum. // This is because the callee pops stack arguments, and we need to keep track of this during stack // walking const unsigned numStkArgBytes = call->fgArgInfo->GetNextSlotByteOffset(); GenTree* stackBytes = comp->gtNewIconNode(numStkArgBytes, TYP_INT); GenTreeCall::Use* args = comp->gtNewCallArgs(frameAddr, stackBytes); #else GenTreeCall::Use* args = comp->gtNewCallArgs(frameAddr); #endif // Insert call to CORINFO_HELP_JIT_PINVOKE_BEGIN GenTree* helperCall = comp->gtNewHelperCallNode(CORINFO_HELP_JIT_PINVOKE_BEGIN, TYP_VOID, args); comp->fgMorphTree(helperCall); BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, helperCall)); LowerNode(helperCall); // helper call is inserted before current node and should be lowered here. return; } // Emit the following sequence: // // InlinedCallFrame.callTarget = methodHandle // stored in m_Datum // InlinedCallFrame.m_pCallSiteSP = SP // x86 only // InlinedCallFrame.m_pCallerReturnAddress = return address // GT_START_PREEEMPTC // Thread.gcState = 0 // (non-stub) - update top Frame on TCB // 64-bit targets only // ---------------------------------------------------------------------------------- // Setup InlinedCallFrame.callSiteTarget (which is how the JIT refers to it). // The actual field is InlinedCallFrame.m_Datum which has many different uses and meanings. GenTree* src = nullptr; if (callType == CT_INDIRECT) { #if !defined(TARGET_64BIT) // On 32-bit targets, indirect calls need the size of the stack args in InlinedCallFrame.m_Datum. const unsigned stackByteOffset = call->fgArgInfo->GetNextSlotByteOffset(); src = comp->gtNewIconNode(stackByteOffset, TYP_INT); #else // On 64-bit targets, indirect calls may need the stub parameter value in InlinedCallFrame.m_Datum. // If the stub parameter value is not needed, m_Datum will be initialized by the VM. if (comp->info.compPublishStubParam) { src = comp->gtNewLclvNode(comp->lvaStubArgumentVar, TYP_I_IMPL); } #endif // !defined(TARGET_64BIT) } else { assert(callType == CT_USER_FUNC); void* pEmbedMethodHandle = nullptr; CORINFO_METHOD_HANDLE embedMethodHandle = comp->info.compCompHnd->embedMethodHandle(call->gtCallMethHnd, &pEmbedMethodHandle); noway_assert((!embedMethodHandle) != (!pEmbedMethodHandle)); if (embedMethodHandle != nullptr) { // InlinedCallFrame.callSiteTarget = methodHandle src = AddrGen(embedMethodHandle); } else { // InlinedCallFrame.callSiteTarget = *pEmbedMethodHandle src = Ind(AddrGen(pEmbedMethodHandle)); } } if (src != nullptr) { // Store into InlinedCallFrame.m_Datum, the offset of which is given by offsetOfCallTarget. GenTreeLclFld* store = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCallTarget); store->gtOp1 = src; store->gtFlags |= GTF_VAR_DEF; InsertTreeBeforeAndContainCheck(insertBefore, store); } #ifdef TARGET_X86 // ---------------------------------------------------------------------------------- // InlinedCallFrame.m_pCallSiteSP = SP GenTreeLclFld* storeCallSiteSP = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCallSiteSP); storeCallSiteSP->gtOp1 = PhysReg(REG_SPBASE); storeCallSiteSP->gtFlags |= GTF_VAR_DEF; InsertTreeBeforeAndContainCheck(insertBefore, storeCallSiteSP); #endif // ---------------------------------------------------------------------------------- // InlinedCallFrame.m_pCallerReturnAddress = &label (the address of the instruction immediately following the call) GenTreeLclFld* storeLab = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfReturnAddress); storeLab->gtOp1 = new (comp, GT_LABEL) GenTree(GT_LABEL, TYP_I_IMPL); storeLab->gtFlags |= GTF_VAR_DEF; InsertTreeBeforeAndContainCheck(insertBefore, storeLab); // Push the PInvoke frame if necessary. On 32-bit targets this only happens in the method prolog if a method // contains PInvokes; on 64-bit targets this is necessary in non-stubs. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (!comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) { // Set the TCB's frame to be the one we just created. // Note the init routine for the InlinedCallFrame (CORINFO_HELP_INIT_PINVOKE_FRAME) // has prepended it to the linked list to maintain the stack of Frames. // // Stubs do this once per stub, not once per call. GenTree* frameUpd = CreateFrameLinkUpdate(PushFrame); BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, frameUpd)); ContainCheckStoreIndir(frameUpd->AsStoreInd()); } #endif // TARGET_64BIT // IMPORTANT **** This instruction must be the last real instruction **** // It changes the thread's state to Preemptive mode // ---------------------------------------------------------------------------------- // [tcb + offsetOfGcState] = 0 GenTree* storeGCState = SetGCState(0); BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, storeGCState)); ContainCheckStoreIndir(storeGCState->AsStoreInd()); // Indicate that codegen has switched this thread to preemptive GC. // This tree node doesn't generate any code, but impacts LSRA and gc reporting. // This tree node is simple so doesn't require sequencing. GenTree* preemptiveGCNode = new (comp, GT_START_PREEMPTGC) GenTree(GT_START_PREEMPTGC, TYP_VOID); BlockRange().InsertBefore(insertBefore, preemptiveGCNode); } //------------------------------------------------------------------------ // InsertPInvokeCallEpilog: Insert the code that goes after every inlined pinvoke call. // // Arguments: // call - the call for which we are inserting the PInvoke epilog. // // Return Value: // None. // void Lowering::InsertPInvokeCallEpilog(GenTreeCall* call) { JITDUMP("======= Inserting PInvoke call epilog\n"); if (comp->opts.ShouldUsePInvokeHelpers()) { noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM); // First argument is the address of the frame variable. GenTree* frameAddr = comp->gtNewLclVarAddrNode(comp->lvaInlinedPInvokeFrameVar, TYP_BYREF); #if defined(DEBUG) const LclVarDsc* inlinedPInvokeDsc = comp->lvaGetDesc(comp->lvaInlinedPInvokeFrameVar); assert(inlinedPInvokeDsc->IsAddressExposed()); #endif // DEBUG // Insert call to CORINFO_HELP_JIT_PINVOKE_END GenTreeCall* helperCall = comp->gtNewHelperCallNode(CORINFO_HELP_JIT_PINVOKE_END, TYP_VOID, comp->gtNewCallArgs(frameAddr)); comp->fgMorphTree(helperCall); BlockRange().InsertAfter(call, LIR::SeqTree(comp, helperCall)); ContainCheckCallOperands(helperCall); return; } // gcstate = 1 GenTree* insertionPoint = call->gtNext; GenTree* tree = SetGCState(1); BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree)); ContainCheckStoreIndir(tree->AsStoreInd()); tree = CreateReturnTrapSeq(); BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree)); ContainCheckReturnTrap(tree->AsOp()); // Pop the frame if necessary. On 32-bit targets this only happens in the method epilog; on 64-bit targets thi // happens after every PInvoke call in non-stubs. 32-bit targets instead mark the frame as inactive. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (!comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) { tree = CreateFrameLinkUpdate(PopFrame); BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree)); ContainCheckStoreIndir(tree->AsStoreInd()); } #else const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = comp->eeGetEEInfo()->inlinedCallFrameInfo; // ---------------------------------------------------------------------------------- // InlinedCallFrame.m_pCallerReturnAddress = nullptr GenTreeLclFld* const storeCallSiteTracker = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfReturnAddress); GenTreeIntCon* const constantZero = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, 0); storeCallSiteTracker->gtOp1 = constantZero; storeCallSiteTracker->gtFlags |= GTF_VAR_DEF; BlockRange().InsertBefore(insertionPoint, constantZero, storeCallSiteTracker); ContainCheckStoreLoc(storeCallSiteTracker); #endif // TARGET_64BIT } //------------------------------------------------------------------------ // LowerNonvirtPinvokeCall: Lower a non-virtual / indirect PInvoke call // // Arguments: // call - The call to lower. // // Return Value: // The lowered call tree. // GenTree* Lowering::LowerNonvirtPinvokeCall(GenTreeCall* call) { // PInvoke lowering varies depending on the flags passed in by the EE. By default, // GC transitions are generated inline; if CORJIT_FLAG_USE_PINVOKE_HELPERS is specified, // GC transitions are instead performed using helper calls. Examples of each case are given // below. Note that the data structure that is used to store information about a call frame // containing any P/Invoke calls is initialized in the method prolog (see // InsertPInvokeMethod{Prolog,Epilog} for details). // // Inline transitions: // InlinedCallFrame inlinedCallFrame; // // ... // // // Set up frame information // inlinedCallFrame.callTarget = methodHandle; // stored in m_Datum // inlinedCallFrame.m_pCallSiteSP = SP; // x86 only // inlinedCallFrame.m_pCallerReturnAddress = &label; (the address of the instruction immediately following the // call) // Thread.m_pFrame = &inlinedCallFrame; (non-IL-stub only) // // // Switch the thread's GC mode to preemptive mode // thread->m_fPreemptiveGCDisabled = 0; // // // Call the unmanaged method // target(); // // // Switch the thread's GC mode back to cooperative mode // thread->m_fPreemptiveGCDisabled = 1; // // // Rendezvous with a running collection if necessary // if (g_TrapReturningThreads) // RareDisablePreemptiveGC(); // // Transistions using helpers: // // OpaqueFrame opaqueFrame; // // ... // // // Call the JIT_PINVOKE_BEGIN helper // JIT_PINVOKE_BEGIN(&opaqueFrame); // // // Call the unmanaged method // target(); // // // Call the JIT_PINVOKE_END helper // JIT_PINVOKE_END(&opaqueFrame); // // Note that the JIT_PINVOKE_{BEGIN.END} helpers currently use the default calling convention for the target // platform. They may be changed in the future such that they preserve all register values. GenTree* result = nullptr; // All code generated by this function must not contain the randomly-inserted NOPs // that we insert to inhibit JIT spraying in partial trust scenarios. // The PINVOKE_PROLOG op signals this to the code generator/emitter. GenTree* prolog = new (comp, GT_NOP) GenTree(GT_PINVOKE_PROLOG, TYP_VOID); BlockRange().InsertBefore(call, prolog); bool addPInvokePrologEpilog = !call->IsSuppressGCTransition(); if (addPInvokePrologEpilog) { InsertPInvokeCallProlog(call); } if (call->gtCallType != CT_INDIRECT) { noway_assert(call->gtCallType == CT_USER_FUNC); CORINFO_METHOD_HANDLE methHnd = call->gtCallMethHnd; CORINFO_CONST_LOOKUP lookup; comp->info.compCompHnd->getAddressOfPInvokeTarget(methHnd, &lookup); void* addr = lookup.addr; GenTree* addrTree; switch (lookup.accessType) { case IAT_VALUE: // IsCallTargetInRange always return true on x64. It wants to use rip-based addressing // for this call. Unfortunately, in case of pinvokes (+suppressgctransition) to external libs // (e.g. kernel32.dll) the relative offset is unlikely to fit into int32 and we will have to // turn fAllowRel32 off globally. if ((call->IsSuppressGCTransition() && !comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) || !IsCallTargetInRange(addr)) { result = AddrGen(addr); } else { // a direct call within range of hardware relative call instruction // stash the address for codegen call->gtDirectCallAddress = addr; #ifdef FEATURE_READYTORUN call->gtEntryPoint.addr = nullptr; call->gtEntryPoint.accessType = IAT_VALUE; #endif } break; case IAT_PVALUE: addrTree = AddrGen(addr); #ifdef DEBUG addrTree->AsIntCon()->gtTargetHandle = (size_t)methHnd; #endif result = Ind(addrTree); break; case IAT_PPVALUE: // ToDo: Expanding an IAT_PPVALUE here, loses the opportunity // to Hoist/CSE the first indirection as it is an invariant load // // This case currently occurs today when we make PInvoke calls in crossgen // // assert(!"IAT_PPVALUE in Lowering::LowerNonvirtPinvokeCall"); addrTree = AddrGen(addr); #ifdef DEBUG addrTree->AsIntCon()->gtTargetHandle = (size_t)methHnd; #endif // Double-indirection. Load the address into a register // and call indirectly through the register // result = Ind(Ind(addrTree)); break; case IAT_RELPVALUE: unreached(); } } if (addPInvokePrologEpilog) { InsertPInvokeCallEpilog(call); } return result; } // Expand the code necessary to calculate the control target. // Returns: the expression needed to calculate the control target // May insert embedded statements GenTree* Lowering::LowerVirtualVtableCall(GenTreeCall* call) { noway_assert(call->gtCallType == CT_USER_FUNC); regNumber thisPtrArgReg = comp->codeGen->genGetThisArgReg(call); // get a reference to the thisPtr being passed fgArgTabEntry* argEntry = comp->gtArgEntryByArgNum(call, 0); assert(argEntry->GetRegNum() == thisPtrArgReg); assert(argEntry->GetNode()->OperIs(GT_PUTARG_REG)); GenTree* thisPtr = argEntry->GetNode()->AsUnOp()->gtGetOp1(); // If what we are passing as the thisptr is not already a local, make a new local to place it in // because we will be creating expressions based on it. unsigned lclNum; if (thisPtr->OperIsLocal()) { lclNum = thisPtr->AsLclVarCommon()->GetLclNum(); } else { // Split off the thisPtr and store to a temporary variable. if (vtableCallTemp == BAD_VAR_NUM) { vtableCallTemp = comp->lvaGrabTemp(true DEBUGARG("virtual vtable call")); } LIR::Use thisPtrUse(BlockRange(), &(argEntry->GetNode()->AsUnOp()->gtOp1), argEntry->GetNode()); ReplaceWithLclVar(thisPtrUse, vtableCallTemp); lclNum = vtableCallTemp; } // Get hold of the vtable offset (note: this might be expensive) unsigned vtabOffsOfIndirection; unsigned vtabOffsAfterIndirection; bool isRelative; comp->info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection, &vtabOffsAfterIndirection, &isRelative); // If the thisPtr is a local field, then construct a local field type node GenTree* local; if (thisPtr->isLclField()) { local = new (comp, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, thisPtr->TypeGet(), lclNum, thisPtr->AsLclFld()->GetLclOffs()); } else { local = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, thisPtr->TypeGet(), lclNum); } // pointer to virtual table = [REG_CALL_THIS + offs] GenTree* result = Ind(Offset(local, VPTR_OFFS)); // Get the appropriate vtable chunk if (vtabOffsOfIndirection != CORINFO_VIRTUALCALL_NO_CHUNK) { if (isRelative) { // MethodTable offset is a relative pointer. // // Additional temporary variable is used to store virtual table pointer. // Address of method is obtained by the next computations: // // Save relative offset to tmp (vtab is virtual table pointer, vtabOffsOfIndirection is offset of // vtable-1st-level-indirection): // tmp = vtab // // Save address of method to result (vtabOffsAfterIndirection is offset of vtable-2nd-level-indirection): // result = [tmp + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp + vtabOffsOfIndirection]] // // // If relative pointers are also in second level indirection, additional temporary is used: // tmp1 = vtab // tmp2 = tmp1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp1 + vtabOffsOfIndirection] // result = tmp2 + [tmp2] // unsigned lclNumTmp = comp->lvaGrabTemp(true DEBUGARG("lclNumTmp")); unsigned lclNumTmp2 = comp->lvaGrabTemp(true DEBUGARG("lclNumTmp2")); GenTree* lclvNodeStore = comp->gtNewTempAssign(lclNumTmp, result); GenTree* tmpTree = comp->gtNewLclvNode(lclNumTmp, result->TypeGet()); tmpTree = Offset(tmpTree, vtabOffsOfIndirection); tmpTree = comp->gtNewOperNode(GT_IND, TYP_I_IMPL, tmpTree, false); GenTree* offs = comp->gtNewIconNode(vtabOffsOfIndirection + vtabOffsAfterIndirection, TYP_INT); result = comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, comp->gtNewLclvNode(lclNumTmp, result->TypeGet()), offs); GenTree* base = OffsetByIndexWithScale(result, tmpTree, 1); GenTree* lclvNodeStore2 = comp->gtNewTempAssign(lclNumTmp2, base); LIR::Range range = LIR::SeqTree(comp, lclvNodeStore); JITDUMP("result of obtaining pointer to virtual table:\n"); DISPRANGE(range); BlockRange().InsertBefore(call, std::move(range)); LIR::Range range2 = LIR::SeqTree(comp, lclvNodeStore2); ContainCheckIndir(tmpTree->AsIndir()); JITDUMP("result of obtaining pointer to virtual table 2nd level indirection:\n"); DISPRANGE(range2); BlockRange().InsertAfter(lclvNodeStore, std::move(range2)); result = Ind(comp->gtNewLclvNode(lclNumTmp2, result->TypeGet())); result = comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, result, comp->gtNewLclvNode(lclNumTmp2, result->TypeGet())); } else { // result = [REG_CALL_IND_SCRATCH + vtabOffsOfIndirection] result = Ind(Offset(result, vtabOffsOfIndirection)); } } else { assert(!isRelative); } // Load the function address // result = [reg+vtabOffs] if (!isRelative) { result = Ind(Offset(result, vtabOffsAfterIndirection)); } return result; } // Lower stub dispatched virtual calls. GenTree* Lowering::LowerVirtualStubCall(GenTreeCall* call) { assert(call->IsVirtualStub()); // An x86 JIT which uses full stub dispatch must generate only // the following stub dispatch calls: // // (1) isCallRelativeIndirect: // call dword ptr [rel32] ; FF 15 ---rel32---- // (2) isCallRelative: // call abc ; E8 ---rel32---- // (3) isCallRegisterIndirect: // 3-byte nop ; // call dword ptr [eax] ; FF 10 // // THIS IS VERY TIGHTLY TIED TO THE PREDICATES IN // vm\i386\cGenCpu.h, esp. isCallRegisterIndirect. GenTree* result = nullptr; // This is code to set up an indirect call to a stub address computed // via dictionary lookup. if (call->gtCallType == CT_INDIRECT) { // The importer decided we needed a stub call via a computed // stub dispatch address, i.e. an address which came from a dictionary lookup. // - The dictionary lookup produces an indirected address, suitable for call // via "call [VirtualStubParam.reg]" // // This combination will only be generated for shared generic code and when // stub dispatch is active. // fgMorphArgs will have created trees to pass the address in VirtualStubParam.reg. // All we have to do here is add an indirection to generate the actual call target. GenTree* ind = Ind(call->gtCallAddr); BlockRange().InsertAfter(call->gtCallAddr, ind); call->gtCallAddr = ind; ind->gtFlags |= GTF_IND_REQ_ADDR_IN_REG; ContainCheckIndir(ind->AsIndir()); } else { // Direct stub call. // Get stub addr. This will return NULL if virtual call stubs are not active void* stubAddr = call->gtStubCallStubAddr; noway_assert(stubAddr != nullptr); // If not CT_INDIRECT, then it should always be relative indir call. // This is ensured by VM. noway_assert(call->IsVirtualStubRelativeIndir()); // Direct stub calls, though the stubAddr itself may still need to be // accessed via an indirection. GenTree* addr = AddrGen(stubAddr); // On x86, for tailcall via helper, the JIT_TailCall helper takes the stubAddr as // the target address, and we set a flag that it's a VSD call. The helper then // handles any necessary indirection. if (call->IsTailCallViaJitHelper()) { result = addr; } else { bool shouldOptimizeVirtualStubCall = false; #if defined(TARGET_ARMARCH) || defined(TARGET_AMD64) // Skip inserting the indirection node to load the address that is already // computed in the VSD stub arg register as a hidden parameter. Instead during the // codegen, just load the call target from there. shouldOptimizeVirtualStubCall = !comp->opts.IsCFGEnabled(); #endif if (!shouldOptimizeVirtualStubCall) { result = Ind(addr); } } } // TODO-Cleanup: start emitting random NOPS return result; } //------------------------------------------------------------------------ // Lowering::AreSourcesPossibleModifiedLocals: // Given two nodes which will be used in an addressing mode (base, // index), check to see if they are lclVar reads, and if so, walk // backwards from the use until both reads have been visited to // determine if they are potentially modified in that range. // // Arguments: // addr - the node that uses the base and index nodes // base - the base node // index - the index node // // Returns: true if either the base or index may be modified between the // node and addr. // bool Lowering::AreSourcesPossiblyModifiedLocals(GenTree* addr, GenTree* base, GenTree* index) { assert(addr != nullptr); SideEffectSet baseSideEffects; if (base != nullptr) { if (base->OperIsLocalRead()) { baseSideEffects.AddNode(comp, base); } else { base = nullptr; } } SideEffectSet indexSideEffects; if (index != nullptr) { if (index->OperIsLocalRead()) { indexSideEffects.AddNode(comp, index); } else { index = nullptr; } } for (GenTree* cursor = addr;; cursor = cursor->gtPrev) { assert(cursor != nullptr); if (cursor == base) { base = nullptr; } if (cursor == index) { index = nullptr; } if ((base == nullptr) && (index == nullptr)) { return false; } m_scratchSideEffects.Clear(); m_scratchSideEffects.AddNode(comp, cursor); if ((base != nullptr) && m_scratchSideEffects.InterferesWith(baseSideEffects, false)) { return true; } if ((index != nullptr) && m_scratchSideEffects.InterferesWith(indexSideEffects, false)) { return true; } } } //------------------------------------------------------------------------ // TryCreateAddrMode: recognize trees which can be implemented using an // addressing mode and transform them to a GT_LEA // // Arguments: // addr - the use of the address we want to transform // isContainable - true if this addressing mode can be contained // parent - the node that consumes the given addr (most likely it's an IND) // // Returns: // true if the address node was changed to a LEA, false otherwise. // bool Lowering::TryCreateAddrMode(GenTree* addr, bool isContainable, GenTree* parent) { if (!addr->OperIs(GT_ADD) || addr->gtOverflow()) { return false; } #ifdef TARGET_ARM64 if (parent->OperIsIndir() && parent->AsIndir()->IsVolatile() && !varTypeIsGC(addr)) { // For Arm64 we avoid using LEA for volatile INDs // because we won't be able to use ldar/star return false; } #endif GenTree* base = nullptr; GenTree* index = nullptr; unsigned scale = 0; ssize_t offset = 0; bool rev = false; // Find out if an addressing mode can be constructed bool doAddrMode = comp->codeGen->genCreateAddrMode(addr, // address true, // fold &rev, // reverse ops &base, // base addr &index, // index val &scale, // scaling &offset); // displacement var_types targetType = parent->OperIsIndir() ? parent->TypeGet() : TYP_UNDEF; #ifdef TARGET_ARMARCH // Multiplier should be a "natural-scale" power of two number which is equal to target's width. // // *(ulong*)(data + index * 8); - can be optimized // *(ulong*)(data + index * 7); - can not be optimized // *(int*)(data + index * 2); - can not be optimized // if ((scale > 0) && (genTypeSize(targetType) != scale)) { return false; } #endif if (scale == 0) { scale = 1; } if (!isContainable) { // this is just a reg-const add if (index == nullptr) { return false; } // this is just a reg-reg add if ((scale == 1) && (offset == 0)) { return false; } } // make sure there are not any side effects between def of leaves and use if (!doAddrMode || AreSourcesPossiblyModifiedLocals(addr, base, index)) { JITDUMP("No addressing mode:\n "); DISPNODE(addr); return false; } JITDUMP("Addressing mode:\n"); JITDUMP(" Base\n "); DISPNODE(base); if (index != nullptr) { JITDUMP(" + Index * %u + %d\n ", scale, offset); DISPNODE(index); } else { JITDUMP(" + %d\n", offset); } // Save the (potentially) unused operands before changing the address to LEA. ArrayStack<GenTree*> unusedStack(comp->getAllocator(CMK_ArrayStack)); unusedStack.Push(addr->AsOp()->gtGetOp1()); unusedStack.Push(addr->AsOp()->gtGetOp2()); addr->ChangeOper(GT_LEA); // Make sure there are no leftover side effects (though the existing ADD we're // changing shouldn't have any at this point, but sometimes it does). addr->gtFlags &= ~GTF_ALL_EFFECT; GenTreeAddrMode* addrMode = addr->AsAddrMode(); addrMode->SetBase(base); addrMode->SetIndex(index); addrMode->SetScale(scale); addrMode->SetOffset(static_cast<int>(offset)); // Neither the base nor the index should now be contained. if (base != nullptr) { base->ClearContained(); } if (index != nullptr) { index->ClearContained(); } // Remove all the nodes that are no longer used. while (!unusedStack.Empty()) { GenTree* unused = unusedStack.Pop(); // Use a loop to process some of the nodes iteratively // instead of pushing them on the stack. while ((unused != base) && (unused != index)) { JITDUMP("Removing unused node:\n "); DISPNODE(unused); BlockRange().Remove(unused); if (unused->OperIs(GT_ADD, GT_MUL, GT_LSH)) { // Push the first operand and loop back to process the second one. // This minimizes the stack depth because the second one tends to be // a constant so it gets processed and then the first one gets popped. unusedStack.Push(unused->AsOp()->gtGetOp1()); unused = unused->AsOp()->gtGetOp2(); } else { assert(unused->OperIs(GT_CNS_INT)); break; } } } #ifdef TARGET_ARM64 // Check if we can "contain" LEA(BFIZ) in order to extend 32bit index to 64bit as part of load/store. if ((index != nullptr) && index->OperIs(GT_BFIZ) && index->gtGetOp1()->OperIs(GT_CAST) && index->gtGetOp2()->IsCnsIntOrI() && (varTypeIsIntegral(targetType) || varTypeIsFloating(targetType))) { // BFIZ node is a binary op where op1 is GT_CAST and op2 is GT_CNS_INT GenTreeCast* cast = index->gtGetOp1()->AsCast(); assert(cast->isContained()); const unsigned shiftBy = (unsigned)index->gtGetOp2()->AsIntCon()->IconValue(); // 'scale' and 'offset' have to be unset since we're going to use [base + index * SXTW/UXTW scale] form // where there is no room for additional offsets/scales on ARM64. 'shiftBy' has to match target's width. if (cast->CastOp()->TypeIs(TYP_INT) && cast->TypeIs(TYP_LONG) && (genTypeSize(targetType) == (1U << shiftBy)) && (scale == 1) && (offset == 0)) { // TODO: Make sure that genCreateAddrMode marks such BFIZ candidates as GTF_DONT_CSE for better CQ. MakeSrcContained(addrMode, index); } } #endif JITDUMP("New addressing mode node:\n "); DISPNODE(addrMode); JITDUMP("\n"); return true; } //------------------------------------------------------------------------ // LowerAdd: turn this add into a GT_LEA if that would be profitable // // Arguments: // node - the node we care about // // Returns: // nullptr if no transformation was done, or the next node in the transformed node sequence that // needs to be lowered. // GenTree* Lowering::LowerAdd(GenTreeOp* node) { if (varTypeIsIntegralOrI(node->TypeGet())) { GenTree* op1 = node->gtGetOp1(); GenTree* op2 = node->gtGetOp2(); LIR::Use use; // It is not the best place to do such simple arithmetic optimizations, // but it allows us to avoid `LEA(addr, 0)` nodes and doing that in morph // requires more changes. Delete that part if we get an expression optimizer. if (op2->IsIntegralConst(0)) { JITDUMP("Lower: optimize val + 0: "); DISPNODE(node); JITDUMP("Replaced with: "); DISPNODE(op1); if (BlockRange().TryGetUse(node, &use)) { use.ReplaceWith(op1); } else { op1->SetUnusedValue(); } GenTree* next = node->gtNext; BlockRange().Remove(op2); BlockRange().Remove(node); JITDUMP("Remove [%06u], [%06u]\n", op2->gtTreeID, node->gtTreeID); return next; } #ifndef TARGET_ARMARCH if (BlockRange().TryGetUse(node, &use)) { // If this is a child of an indir, let the parent handle it. // If there is a chain of adds, only look at the topmost one. GenTree* parent = use.User(); if (!parent->OperIsIndir() && !parent->OperIs(GT_ADD)) { TryCreateAddrMode(node, false, parent); } } #endif // !TARGET_ARMARCH } if (node->OperIs(GT_ADD)) { ContainCheckBinary(node); } return nullptr; } //------------------------------------------------------------------------ // LowerUnsignedDivOrMod: Lowers a GT_UDIV/GT_UMOD node. // // Arguments: // divMod - pointer to the GT_UDIV/GT_UMOD node to be lowered // // Return Value: // Returns a boolean indicating whether the node was transformed. // // Notes: // - Transform UDIV/UMOD by power of 2 into RSZ/AND // - Transform UDIV by constant >= 2^(N-1) into GE // - Transform UDIV/UMOD by constant >= 3 into "magic division" // bool Lowering::LowerUnsignedDivOrMod(GenTreeOp* divMod) { assert(divMod->OperIs(GT_UDIV, GT_UMOD)); #if defined(USE_HELPERS_FOR_INT_DIV) if (!varTypeIsIntegral(divMod->TypeGet())) { assert(!"unreachable: integral GT_UDIV/GT_UMOD should get morphed into helper calls"); } assert(varTypeIsFloating(divMod->TypeGet())); #endif // USE_HELPERS_FOR_INT_DIV #if defined(TARGET_ARM64) assert(divMod->OperGet() != GT_UMOD); #endif // TARGET_ARM64 GenTree* dividend = divMod->gtGetOp1(); GenTree* divisor = divMod->gtGetOp2(); #if !defined(TARGET_64BIT) if (dividend->OperIs(GT_LONG)) { return false; } #endif if (!divisor->IsCnsIntOrI()) { return false; } if (dividend->IsCnsIntOrI()) { // We shouldn't see a divmod with constant operands here but if we do then it's likely // because optimizations are disabled or it's a case that's supposed to throw an exception. // Don't optimize this. return false; } const var_types type = divMod->TypeGet(); assert((type == TYP_INT) || (type == TYP_I_IMPL)); size_t divisorValue = static_cast<size_t>(divisor->AsIntCon()->IconValue()); if (type == TYP_INT) { // Clear up the upper 32 bits of the value, they may be set to 1 because constants // are treated as signed and stored in ssize_t which is 64 bit in size on 64 bit targets. divisorValue &= UINT32_MAX; } if (divisorValue == 0) { return false; } const bool isDiv = divMod->OperIs(GT_UDIV); if (isPow2(divisorValue)) { genTreeOps newOper; if (isDiv) { newOper = GT_RSZ; divisorValue = genLog2(divisorValue); } else { newOper = GT_AND; divisorValue -= 1; } divMod->SetOper(newOper); divisor->AsIntCon()->SetIconValue(divisorValue); ContainCheckNode(divMod); return true; } if (isDiv) { // If the divisor is greater or equal than 2^(N - 1) then the result is 1 // iff the dividend is greater or equal than the divisor. if (((type == TYP_INT) && (divisorValue > (UINT32_MAX / 2))) || ((type == TYP_LONG) && (divisorValue > (UINT64_MAX / 2)))) { divMod->SetOper(GT_GE); divMod->gtFlags |= GTF_UNSIGNED; ContainCheckNode(divMod); return true; } } // TODO-ARM-CQ: Currently there's no GT_MULHI for ARM32 #if defined(TARGET_XARCH) || defined(TARGET_ARM64) if (!comp->opts.MinOpts() && (divisorValue >= 3)) { size_t magic; bool increment; int preShift; int postShift; bool simpleMul = false; unsigned bits = type == TYP_INT ? 32 : 64; // if the dividend operand is AND or RSZ with a constant then the number of input bits can be reduced if (dividend->OperIs(GT_AND) && dividend->gtGetOp2()->IsCnsIntOrI()) { size_t maskCns = static_cast<size_t>(dividend->gtGetOp2()->AsIntCon()->IconValue()); if (maskCns != 0) { unsigned maskBits = 1; while (maskCns >>= 1) maskBits++; if (maskBits < bits) bits = maskBits; } } else if (dividend->OperIs(GT_RSZ) && dividend->gtGetOp2()->IsCnsIntOrI()) { size_t shiftCns = static_cast<size_t>(dividend->gtGetOp2()->AsIntCon()->IconValue()); if (shiftCns < bits) { bits -= static_cast<unsigned>(shiftCns); } } if (type == TYP_INT) { magic = MagicDivide::GetUnsigned32Magic(static_cast<uint32_t>(divisorValue), &increment, &preShift, &postShift, bits); #ifdef TARGET_64BIT // avoid inc_saturate/multiple shifts by widening to 32x64 MULHI if (increment || (preShift #ifdef TARGET_XARCH // IMUL reg,reg,imm32 can't be used if magic<0 because of sign-extension && static_cast<int32_t>(magic) < 0 #endif )) { magic = MagicDivide::GetUnsigned64Magic(static_cast<uint64_t>(divisorValue), &increment, &preShift, &postShift, bits); } // otherwise just widen to regular multiplication else { postShift += 32; simpleMul = true; } #endif } else { #ifdef TARGET_64BIT magic = MagicDivide::GetUnsigned64Magic(static_cast<uint64_t>(divisorValue), &increment, &preShift, &postShift, bits); #else unreached(); #endif } assert(divMod->MarkedDivideByConstOptimized()); const bool requiresDividendMultiuse = !isDiv; const weight_t curBBWeight = m_block->getBBWeight(comp); if (requiresDividendMultiuse) { LIR::Use dividendUse(BlockRange(), &divMod->gtOp1, divMod); dividend = ReplaceWithLclVar(dividendUse); } GenTree* firstNode = nullptr; GenTree* adjustedDividend = dividend; #ifdef TARGET_ARM64 // On ARM64 we will use a 32x32->64 bit multiply instead of a 64x64->64 one. bool widenToNativeIntForMul = (type != TYP_I_IMPL) && !simpleMul; #else CLANG_FORMAT_COMMENT_ANCHOR; bool widenToNativeIntForMul = (type != TYP_I_IMPL); #endif // If "increment" flag is returned by GetUnsignedMagic we need to do Saturating Increment first if (increment) { adjustedDividend = comp->gtNewOperNode(GT_INC_SATURATE, type, adjustedDividend); BlockRange().InsertBefore(divMod, adjustedDividend); firstNode = adjustedDividend; assert(!preShift); } // if "preShift" is required, then do a right shift before else if (preShift) { GenTree* preShiftBy = comp->gtNewIconNode(preShift, TYP_INT); adjustedDividend = comp->gtNewOperNode(GT_RSZ, type, adjustedDividend, preShiftBy); BlockRange().InsertBefore(divMod, preShiftBy, adjustedDividend); firstNode = preShiftBy; } else if (widenToNativeIntForMul) { adjustedDividend = comp->gtNewCastNode(TYP_I_IMPL, adjustedDividend, true, TYP_I_IMPL); BlockRange().InsertBefore(divMod, adjustedDividend); firstNode = adjustedDividend; } #ifdef TARGET_XARCH // force input transformation to RAX because the following MULHI will kill RDX:RAX anyway and LSRA often causes // reduntant copies otherwise if (firstNode && !simpleMul) { adjustedDividend->SetRegNum(REG_RAX); } #endif if (widenToNativeIntForMul) { divisor->gtType = TYP_I_IMPL; } divisor->AsIntCon()->SetIconValue(magic); if (isDiv && !postShift && (type == TYP_I_IMPL)) { divMod->SetOper(GT_MULHI); divMod->gtOp1 = adjustedDividend; divMod->SetUnsigned(); } else { #ifdef TARGET_ARM64 // 64-bit MUL is more expensive than UMULL on ARM64. genTreeOps mulOper = simpleMul ? GT_MUL_LONG : GT_MULHI; #else // 64-bit IMUL is less expensive than MUL eax:edx on x64. genTreeOps mulOper = simpleMul ? GT_MUL : GT_MULHI; #endif // Insert a new multiplication node before the existing GT_UDIV/GT_UMOD node. // The existing node will later be transformed into a GT_RSZ/GT_SUB that // computes the final result. This way don't need to find and change the use // of the existing node. GenTree* mulhi = comp->gtNewOperNode(mulOper, TYP_I_IMPL, adjustedDividend, divisor); mulhi->SetUnsigned(); BlockRange().InsertBefore(divMod, mulhi); if (firstNode == nullptr) { firstNode = mulhi; } if (postShift) { GenTree* shiftBy = comp->gtNewIconNode(postShift, TYP_INT); BlockRange().InsertBefore(divMod, shiftBy); if (isDiv && (type == TYP_I_IMPL)) { divMod->SetOper(GT_RSZ); divMod->gtOp1 = mulhi; divMod->gtOp2 = shiftBy; } else { mulhi = comp->gtNewOperNode(GT_RSZ, TYP_I_IMPL, mulhi, shiftBy); BlockRange().InsertBefore(divMod, mulhi); } } if (!isDiv) { // divisor UMOD dividend = dividend SUB (div MUL divisor) GenTree* divisor = comp->gtNewIconNode(divisorValue, type); GenTree* mul = comp->gtNewOperNode(GT_MUL, type, mulhi, divisor); dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()); divMod->SetOper(GT_SUB); divMod->gtOp1 = dividend; divMod->gtOp2 = mul; BlockRange().InsertBefore(divMod, divisor, mul, dividend); } else if (type != TYP_I_IMPL) { #ifdef TARGET_ARMARCH divMod->SetOper(GT_CAST); divMod->SetUnsigned(); divMod->AsCast()->gtCastType = TYP_INT; #else divMod->SetOper(GT_BITCAST); #endif divMod->gtOp1 = mulhi; divMod->gtOp2 = nullptr; } } if (firstNode != nullptr) { ContainCheckRange(firstNode, divMod); } return true; } #endif return false; } // LowerConstIntDivOrMod: Transform integer GT_DIV/GT_MOD nodes with a power of 2 // const divisor into equivalent but faster sequences. // // Arguments: // node - pointer to the DIV or MOD node // // Returns: // nullptr if no transformation is done, or the next node in the transformed node sequence that // needs to be lowered. // GenTree* Lowering::LowerConstIntDivOrMod(GenTree* node) { assert((node->OperGet() == GT_DIV) || (node->OperGet() == GT_MOD)); GenTree* divMod = node; GenTree* dividend = divMod->gtGetOp1(); GenTree* divisor = divMod->gtGetOp2(); const var_types type = divMod->TypeGet(); assert((type == TYP_INT) || (type == TYP_LONG)); #if defined(USE_HELPERS_FOR_INT_DIV) assert(!"unreachable: integral GT_DIV/GT_MOD should get morphed into helper calls"); #endif // USE_HELPERS_FOR_INT_DIV #if defined(TARGET_ARM64) assert(node->OperGet() != GT_MOD); #endif // TARGET_ARM64 if (!divisor->IsCnsIntOrI()) { return nullptr; // no transformations to make } if (dividend->IsCnsIntOrI()) { // We shouldn't see a divmod with constant operands here but if we do then it's likely // because optimizations are disabled or it's a case that's supposed to throw an exception. // Don't optimize this. return nullptr; } ssize_t divisorValue = divisor->AsIntCon()->IconValue(); if (divisorValue == -1 || divisorValue == 0) { // x / 0 and x % 0 can't be optimized because they are required to throw an exception. // x / -1 can't be optimized because INT_MIN / -1 is required to throw an exception. // x % -1 is always 0 and the IL spec says that the rem instruction "can" throw an exception if x is // the minimum representable integer. However, the C# spec says that an exception "is" thrown in this // case so optimizing this case would break C# code. // A runtime check could be used to handle this case but it's probably too rare to matter. return nullptr; } bool isDiv = divMod->OperGet() == GT_DIV; if (isDiv) { if ((type == TYP_INT && divisorValue == INT_MIN) || (type == TYP_LONG && divisorValue == INT64_MIN)) { // If the divisor is the minimum representable integer value then we can use a compare, // the result is 1 iff the dividend equals divisor. divMod->SetOper(GT_EQ); return node; } } size_t absDivisorValue = (divisorValue == SSIZE_T_MIN) ? static_cast<size_t>(divisorValue) : static_cast<size_t>(abs(divisorValue)); if (!isPow2(absDivisorValue)) { if (comp->opts.MinOpts()) { return nullptr; } #if defined(TARGET_XARCH) || defined(TARGET_ARM64) ssize_t magic; int shift; if (type == TYP_INT) { magic = MagicDivide::GetSigned32Magic(static_cast<int32_t>(divisorValue), &shift); } else { #ifdef TARGET_64BIT magic = MagicDivide::GetSigned64Magic(static_cast<int64_t>(divisorValue), &shift); #else // !TARGET_64BIT unreached(); #endif // !TARGET_64BIT } divisor->AsIntConCommon()->SetIconValue(magic); // Insert a new GT_MULHI node in front of the existing GT_DIV/GT_MOD node. // The existing node will later be transformed into a GT_ADD/GT_SUB that // computes the final result. This way don't need to find and change the // use of the existing node. GenTree* mulhi = comp->gtNewOperNode(GT_MULHI, type, divisor, dividend); BlockRange().InsertBefore(divMod, mulhi); // mulhi was the easy part. Now we need to generate different code depending // on the divisor value: // For 3 we need: // div = signbit(mulhi) + mulhi // For 5 we need: // div = signbit(mulhi) + sar(mulhi, 1) ; requires shift adjust // For 7 we need: // mulhi += dividend ; requires add adjust // div = signbit(mulhi) + sar(mulhi, 2) ; requires shift adjust // For -3 we need: // mulhi -= dividend ; requires sub adjust // div = signbit(mulhi) + sar(mulhi, 1) ; requires shift adjust bool requiresAddSubAdjust = signum(divisorValue) != signum(magic); bool requiresShiftAdjust = shift != 0; bool requiresDividendMultiuse = requiresAddSubAdjust || !isDiv; if (requiresDividendMultiuse) { LIR::Use dividendUse(BlockRange(), &mulhi->AsOp()->gtOp2, mulhi); dividend = ReplaceWithLclVar(dividendUse); } GenTree* adjusted; if (requiresAddSubAdjust) { dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()); adjusted = comp->gtNewOperNode(divisorValue > 0 ? GT_ADD : GT_SUB, type, mulhi, dividend); BlockRange().InsertBefore(divMod, dividend, adjusted); } else { adjusted = mulhi; } GenTree* shiftBy = comp->gtNewIconNode(genTypeSize(type) * 8 - 1, type); GenTree* signBit = comp->gtNewOperNode(GT_RSZ, type, adjusted, shiftBy); BlockRange().InsertBefore(divMod, shiftBy, signBit); LIR::Use adjustedUse(BlockRange(), &signBit->AsOp()->gtOp1, signBit); adjusted = ReplaceWithLclVar(adjustedUse); adjusted = comp->gtNewLclvNode(adjusted->AsLclVar()->GetLclNum(), adjusted->TypeGet()); BlockRange().InsertBefore(divMod, adjusted); if (requiresShiftAdjust) { shiftBy = comp->gtNewIconNode(shift, TYP_INT); adjusted = comp->gtNewOperNode(GT_RSH, type, adjusted, shiftBy); BlockRange().InsertBefore(divMod, shiftBy, adjusted); } if (isDiv) { divMod->SetOperRaw(GT_ADD); divMod->AsOp()->gtOp1 = adjusted; divMod->AsOp()->gtOp2 = signBit; } else { GenTree* div = comp->gtNewOperNode(GT_ADD, type, adjusted, signBit); dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()); // divisor % dividend = dividend - divisor x div GenTree* divisor = comp->gtNewIconNode(divisorValue, type); GenTree* mul = comp->gtNewOperNode(GT_MUL, type, div, divisor); BlockRange().InsertBefore(divMod, dividend, div, divisor, mul); divMod->SetOperRaw(GT_SUB); divMod->AsOp()->gtOp1 = dividend; divMod->AsOp()->gtOp2 = mul; } return mulhi; #elif defined(TARGET_ARM) // Currently there's no GT_MULHI for ARM32 return nullptr; #else #error Unsupported or unset target architecture #endif } // We're committed to the conversion now. Go find the use if any. LIR::Use use; if (!BlockRange().TryGetUse(node, &use)) { return nullptr; } // We need to use the dividend node multiple times so its value needs to be // computed once and stored in a temp variable. LIR::Use opDividend(BlockRange(), &divMod->AsOp()->gtOp1, divMod); dividend = ReplaceWithLclVar(opDividend); GenTree* adjustment = comp->gtNewOperNode(GT_RSH, type, dividend, comp->gtNewIconNode(type == TYP_INT ? 31 : 63)); if (absDivisorValue == 2) { // If the divisor is +/-2 then we'd end up with a bitwise and between 0/-1 and 1. // We can get the same result by using GT_RSZ instead of GT_RSH. adjustment->SetOper(GT_RSZ); } else { adjustment = comp->gtNewOperNode(GT_AND, type, adjustment, comp->gtNewIconNode(absDivisorValue - 1, type)); } GenTree* adjustedDividend = comp->gtNewOperNode(GT_ADD, type, adjustment, comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet())); GenTree* newDivMod; if (isDiv) { // perform the division by right shifting the adjusted dividend divisor->AsIntCon()->SetIconValue(genLog2(absDivisorValue)); newDivMod = comp->gtNewOperNode(GT_RSH, type, adjustedDividend, divisor); ContainCheckShiftRotate(newDivMod->AsOp()); if (divisorValue < 0) { // negate the result if the divisor is negative newDivMod = comp->gtNewOperNode(GT_NEG, type, newDivMod); ContainCheckNode(newDivMod); } } else { // divisor % dividend = dividend - divisor x (dividend / divisor) // divisor x (dividend / divisor) translates to (dividend >> log2(divisor)) << log2(divisor) // which simply discards the low log2(divisor) bits, that's just dividend & ~(divisor - 1) divisor->AsIntCon()->SetIconValue(~(absDivisorValue - 1)); newDivMod = comp->gtNewOperNode(GT_SUB, type, comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()), comp->gtNewOperNode(GT_AND, type, adjustedDividend, divisor)); } // Remove the divisor and dividend nodes from the linear order, // since we have reused them and will resequence the tree BlockRange().Remove(divisor); BlockRange().Remove(dividend); // linearize and insert the new tree before the original divMod node InsertTreeBeforeAndContainCheck(divMod, newDivMod); BlockRange().Remove(divMod); // replace the original divmod node with the new divmod tree use.ReplaceWith(newDivMod); return newDivMod->gtNext; } //------------------------------------------------------------------------ // LowerSignedDivOrMod: transform integer GT_DIV/GT_MOD nodes with a power of 2 // const divisor into equivalent but faster sequences. // // Arguments: // node - the DIV or MOD node // // Returns: // The next node to lower. // GenTree* Lowering::LowerSignedDivOrMod(GenTree* node) { assert((node->OperGet() == GT_DIV) || (node->OperGet() == GT_MOD)); GenTree* next = node->gtNext; if (varTypeIsIntegral(node->TypeGet())) { // LowerConstIntDivOrMod will return nullptr if it doesn't transform the node. GenTree* newNode = LowerConstIntDivOrMod(node); if (newNode != nullptr) { return newNode; } } ContainCheckDivOrMod(node->AsOp()); return next; } //------------------------------------------------------------------------ // LowerShift: Lower shift nodes // // Arguments: // shift - the shift node (GT_LSH, GT_RSH or GT_RSZ) // // Notes: // Remove unnecessary shift count masking, xarch shift instructions // mask the shift count to 5 bits (or 6 bits for 64 bit operations). void Lowering::LowerShift(GenTreeOp* shift) { assert(shift->OperIs(GT_LSH, GT_RSH, GT_RSZ)); size_t mask = 0x1f; #ifdef TARGET_64BIT if (varTypeIsLong(shift->TypeGet())) { mask = 0x3f; } #else assert(!varTypeIsLong(shift->TypeGet())); #endif for (GenTree* andOp = shift->gtGetOp2(); andOp->OperIs(GT_AND); andOp = andOp->gtGetOp1()) { GenTree* maskOp = andOp->gtGetOp2(); if (!maskOp->IsCnsIntOrI()) { break; } if ((static_cast<size_t>(maskOp->AsIntCon()->IconValue()) & mask) != mask) { break; } shift->gtOp2 = andOp->gtGetOp1(); BlockRange().Remove(andOp); BlockRange().Remove(maskOp); // The parent was replaced, clear contain and regOpt flag. shift->gtOp2->ClearContained(); } ContainCheckShiftRotate(shift); #ifdef TARGET_ARM64 // Try to recognize ubfiz/sbfiz idiom in LSH(CAST(X), CNS) tree if (comp->opts.OptimizationEnabled() && shift->OperIs(GT_LSH) && shift->gtGetOp1()->OperIs(GT_CAST) && shift->gtGetOp2()->IsCnsIntOrI() && !shift->isContained()) { GenTreeIntCon* cns = shift->gtGetOp2()->AsIntCon(); GenTreeCast* cast = shift->gtGetOp1()->AsCast(); if (!cast->isContained() && !cast->IsRegOptional() && !cast->gtOverflow() && // Smaller CastOp is most likely an IND(X) node which is lowered to a zero-extend load cast->CastOp()->TypeIs(TYP_LONG, TYP_INT)) { // Cast is either "TYP_LONG <- TYP_INT" or "TYP_INT <- %SMALL_INT% <- TYP_INT" (signed or unsigned) unsigned dstBits = genTypeSize(cast) * BITS_PER_BYTE; unsigned srcBits = varTypeIsSmall(cast->CastToType()) ? genTypeSize(cast->CastToType()) * BITS_PER_BYTE : genTypeSize(cast->CastOp()) * BITS_PER_BYTE; assert(!cast->CastOp()->isContained()); // It has to be an upcast and CNS must be in [1..srcBits) range if ((srcBits < dstBits) && (cns->IconValue() > 0) && (cns->IconValue() < srcBits)) { JITDUMP("Recognized ubfix/sbfix pattern in LSH(CAST, CNS). Changing op to GT_BFIZ"); shift->ChangeOper(GT_BFIZ); MakeSrcContained(shift, cast); } } } #endif } void Lowering::WidenSIMD12IfNecessary(GenTreeLclVarCommon* node) { #ifdef FEATURE_SIMD if (node->TypeGet() == TYP_SIMD12) { // Assumption 1: // RyuJit backend depends on the assumption that on 64-Bit targets Vector3 size is rounded off // to TARGET_POINTER_SIZE and hence Vector3 locals on stack can be treated as TYP_SIMD16 for // reading and writing purposes. // // Assumption 2: // RyuJit backend is making another implicit assumption that Vector3 type args when passed in // registers or on stack, the upper most 4-bytes will be zero. // // For P/Invoke return and Reverse P/Invoke argument passing, native compiler doesn't guarantee // that upper 4-bytes of a Vector3 type struct is zero initialized and hence assumption 2 is // invalid. // // RyuJIT x64 Windows: arguments are treated as passed by ref and hence read/written just 12 // bytes. In case of Vector3 returns, Caller allocates a zero initialized Vector3 local and // passes it retBuf arg and Callee method writes only 12 bytes to retBuf. For this reason, // there is no need to clear upper 4-bytes of Vector3 type args. // // RyuJIT x64 Unix: arguments are treated as passed by value and read/writen as if TYP_SIMD16. // Vector3 return values are returned two return registers and Caller assembles them into a // single xmm reg. Hence RyuJIT explicitly generates code to clears upper 4-bytes of Vector3 // type args in prolog and Vector3 type return value of a call // // RyuJIT x86 Windows: all non-param Vector3 local vars are allocated as 16 bytes. Vector3 arguments // are pushed as 12 bytes. For return values, a 16-byte local is allocated and the address passed // as a return buffer pointer. The callee doesn't write the high 4 bytes, and we don't need to clear // it either. LclVarDsc* varDsc = comp->lvaGetDesc(node->AsLclVarCommon()); if (comp->lvaMapSimd12ToSimd16(varDsc)) { JITDUMP("Mapping TYP_SIMD12 lclvar node to TYP_SIMD16:\n"); DISPNODE(node); JITDUMP("============"); node->gtType = TYP_SIMD16; } } #endif // FEATURE_SIMD } //------------------------------------------------------------------------ // LowerArrElem: Lower a GT_ARR_ELEM node // // Arguments: // node - the GT_ARR_ELEM node to lower. // // Return Value: // The next node to lower. // // Assumptions: // pTree points to a pointer to a GT_ARR_ELEM node. // // Notes: // This performs the following lowering. We start with a node of the form: // /--* <arrObj> // +--* <index0> // +--* <index1> // /--* arrMD&[,] // // First, we create temps for arrObj if it is not already a lclVar, and for any of the index // expressions that have side-effects. // We then transform the tree into: // <offset is null - no accumulated offset for the first index> // /--* <arrObj> // +--* <index0> // /--* ArrIndex[i, ] // +--* <arrObj> // /--| arrOffs[i, ] // | +--* <arrObj> // | +--* <index1> // +--* ArrIndex[*,j] // +--* <arrObj> // /--| arrOffs[*,j] // +--* lclVar NewTemp // /--* lea (scale = element size, offset = offset of first element) // // The new stmtExpr may be omitted if the <arrObj> is a lclVar. // The new stmtExpr may be embedded if the <arrObj> is not the first tree in linear order for // the statement containing the original arrMD. // Note that the arrMDOffs is the INDEX of the lea, but is evaluated before the BASE (which is the second // reference to NewTemp), because that provides more accurate lifetimes. // There may be 1, 2 or 3 dimensions, with 1, 2 or 3 arrMDIdx nodes, respectively. // GenTree* Lowering::LowerArrElem(GenTree* node) { // This will assert if we don't have an ArrElem node GenTreeArrElem* arrElem = node->AsArrElem(); const unsigned char rank = arrElem->gtArrRank; JITDUMP("Lowering ArrElem\n"); JITDUMP("============\n"); DISPTREERANGE(BlockRange(), arrElem); JITDUMP("\n"); assert(arrElem->gtArrObj->TypeGet() == TYP_REF); // We need to have the array object in a lclVar. if (!arrElem->gtArrObj->IsLocal()) { LIR::Use arrObjUse(BlockRange(), &arrElem->gtArrObj, arrElem); ReplaceWithLclVar(arrObjUse); } GenTree* arrObjNode = arrElem->gtArrObj; assert(arrObjNode->IsLocal()); GenTree* insertionPoint = arrElem; // The first ArrOffs node will have 0 for the offset of the previous dimension. GenTree* prevArrOffs = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, 0); BlockRange().InsertBefore(insertionPoint, prevArrOffs); GenTree* nextToLower = prevArrOffs; for (unsigned char dim = 0; dim < rank; dim++) { GenTree* indexNode = arrElem->gtArrInds[dim]; // Use the original arrObjNode on the 0th ArrIndex node, and clone it for subsequent ones. GenTree* idxArrObjNode; if (dim == 0) { idxArrObjNode = arrObjNode; } else { idxArrObjNode = comp->gtClone(arrObjNode); BlockRange().InsertBefore(insertionPoint, idxArrObjNode); } // Next comes the GT_ARR_INDEX node. GenTreeArrIndex* arrMDIdx = new (comp, GT_ARR_INDEX) GenTreeArrIndex(TYP_INT, idxArrObjNode, indexNode, dim, rank, arrElem->gtArrElemType); arrMDIdx->gtFlags |= ((idxArrObjNode->gtFlags | indexNode->gtFlags) & GTF_ALL_EFFECT); BlockRange().InsertBefore(insertionPoint, arrMDIdx); GenTree* offsArrObjNode = comp->gtClone(arrObjNode); BlockRange().InsertBefore(insertionPoint, offsArrObjNode); GenTreeArrOffs* arrOffs = new (comp, GT_ARR_OFFSET) GenTreeArrOffs(TYP_I_IMPL, prevArrOffs, arrMDIdx, offsArrObjNode, dim, rank, arrElem->gtArrElemType); arrOffs->gtFlags |= ((prevArrOffs->gtFlags | arrMDIdx->gtFlags | offsArrObjNode->gtFlags) & GTF_ALL_EFFECT); BlockRange().InsertBefore(insertionPoint, arrOffs); prevArrOffs = arrOffs; } // Generate the LEA and make it reverse evaluation, because we want to evaluate the index expression before the // base. unsigned scale = arrElem->gtArrElemSize; unsigned offset = comp->eeGetMDArrayDataOffset(arrElem->gtArrRank); GenTree* leaIndexNode = prevArrOffs; if (!jitIsScaleIndexMul(scale)) { // We do the address arithmetic in TYP_I_IMPL, though note that the lower bounds and lengths in memory are // TYP_INT GenTree* scaleNode = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, scale); GenTree* mulNode = new (comp, GT_MUL) GenTreeOp(GT_MUL, TYP_I_IMPL, leaIndexNode, scaleNode); BlockRange().InsertBefore(insertionPoint, scaleNode, mulNode); leaIndexNode = mulNode; scale = 1; } GenTree* leaBase = comp->gtClone(arrObjNode); BlockRange().InsertBefore(insertionPoint, leaBase); GenTree* leaNode = new (comp, GT_LEA) GenTreeAddrMode(arrElem->TypeGet(), leaBase, leaIndexNode, scale, offset); BlockRange().InsertBefore(insertionPoint, leaNode); LIR::Use arrElemUse; if (BlockRange().TryGetUse(arrElem, &arrElemUse)) { arrElemUse.ReplaceWith(leaNode); } else { leaNode->SetUnusedValue(); } BlockRange().Remove(arrElem); JITDUMP("Results of lowering ArrElem:\n"); DISPTREERANGE(BlockRange(), leaNode); JITDUMP("\n\n"); return nextToLower; } PhaseStatus Lowering::DoPhase() { // If we have any PInvoke calls, insert the one-time prolog code. We'll insert the epilog code in the // appropriate spots later. NOTE: there is a minor optimization opportunity here, as we still create p/invoke // data structures and setup/teardown even if we've eliminated all p/invoke calls due to dead code elimination. if (comp->compMethodRequiresPInvokeFrame()) { InsertPInvokeMethodProlog(); } #if !defined(TARGET_64BIT) DecomposeLongs decomp(comp); // Initialize the long decomposition class. if (comp->compLongUsed) { decomp.PrepareForDecomposition(); } #endif // !defined(TARGET_64BIT) if (!comp->compEnregLocals()) { // Lowering is checking if lvDoNotEnregister is already set for contained optimizations. // If we are running without `CLFLG_REGVAR` flag set (`compEnregLocals() == false`) // then we already know that we won't enregister any locals and it is better to set // `lvDoNotEnregister` flag before we start reading it. // The main reason why this flag is not set is that we are running in minOpts. comp->lvSetMinOptsDoNotEnreg(); } for (BasicBlock* const block : comp->Blocks()) { /* Make the block publicly available */ comp->compCurBB = block; #if !defined(TARGET_64BIT) if (comp->compLongUsed) { decomp.DecomposeBlock(block); } #endif //! TARGET_64BIT LowerBlock(block); } #ifdef DEBUG JITDUMP("Lower has completed modifying nodes.\n"); if (VERBOSE) { comp->fgDispBasicBlocks(true); } #endif // Recompute local var ref counts before potentially sorting for liveness. // Note this does minimal work in cases where we are not going to sort. const bool isRecompute = true; const bool setSlotNumbers = false; comp->lvaComputeRefCounts(isRecompute, setSlotNumbers); comp->fgLocalVarLiveness(); // local var liveness can delete code, which may create empty blocks if (comp->opts.OptimizationEnabled()) { comp->optLoopsMarked = false; bool modified = comp->fgUpdateFlowGraph(); if (modified) { JITDUMP("had to run another liveness pass:\n"); comp->fgLocalVarLiveness(); } } // Recompute local var ref counts again after liveness to reflect // impact of any dead code removal. Note this may leave us with // tracked vars that have zero refs. comp->lvaComputeRefCounts(isRecompute, setSlotNumbers); return PhaseStatus::MODIFIED_EVERYTHING; } #ifdef DEBUG //------------------------------------------------------------------------ // Lowering::CheckCallArg: check that a call argument is in an expected // form after lowering. // // Arguments: // arg - the argument to check. // void Lowering::CheckCallArg(GenTree* arg) { if (!arg->IsValue() && !arg->OperIsPutArgStk()) { assert((arg->OperIsStore() && !arg->IsValue()) || arg->IsArgPlaceHolderNode() || arg->IsNothingNode() || arg->OperIsCopyBlkOp()); return; } switch (arg->OperGet()) { case GT_FIELD_LIST: { GenTreeFieldList* list = arg->AsFieldList(); assert(list->isContained()); for (GenTreeFieldList::Use& use : list->Uses()) { assert(use.GetNode()->OperIsPutArg()); } } break; default: assert(arg->OperIsPutArg()); break; } } //------------------------------------------------------------------------ // Lowering::CheckCall: check that a call is in an expected form after // lowering. Currently this amounts to checking its // arguments, but could be expanded to verify more // properties in the future. // // Arguments: // call - the call to check. // void Lowering::CheckCall(GenTreeCall* call) { if (call->gtCallThisArg != nullptr) { CheckCallArg(call->gtCallThisArg->GetNode()); } for (GenTreeCall::Use& use : call->Args()) { CheckCallArg(use.GetNode()); } for (GenTreeCall::Use& use : call->LateArgs()) { CheckCallArg(use.GetNode()); } } //------------------------------------------------------------------------ // Lowering::CheckNode: check that an LIR node is in an expected form // after lowering. // // Arguments: // compiler - the compiler context. // node - the node to check. // void Lowering::CheckNode(Compiler* compiler, GenTree* node) { switch (node->OperGet()) { case GT_CALL: CheckCall(node->AsCall()); break; #ifdef FEATURE_SIMD case GT_SIMD: case GT_HWINTRINSIC: assert(node->TypeGet() != TYP_SIMD12); break; #endif // FEATURE_SIMD case GT_LCL_VAR: case GT_STORE_LCL_VAR: { const LclVarDsc* varDsc = compiler->lvaGetDesc(node->AsLclVar()); #if defined(FEATURE_SIMD) && defined(TARGET_64BIT) if (node->TypeIs(TYP_SIMD12)) { assert(compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc) || (varDsc->lvSize() == 12)); } #endif // FEATURE_SIMD && TARGET_64BIT if (varDsc->lvPromoted) { assert(varDsc->lvDoNotEnregister || varDsc->lvIsMultiRegRet); } } break; case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: { const GenTreeLclVarCommon* lclVarAddr = node->AsLclVarCommon(); const LclVarDsc* varDsc = compiler->lvaGetDesc(lclVarAddr); if (((lclVarAddr->gtFlags & GTF_VAR_DEF) != 0) && varDsc->HasGCPtr()) { // Emitter does not correctly handle live updates for LCL_VAR_ADDR // when they are not contained, for example, `STOREIND byref(GT_LCL_VAR_ADDR not-contained)` // would generate: // add r1, sp, 48 // r1 contains address of a lclVar V01. // str r0, [r1] // a gc ref becomes live in V01, but emitter would not report it. // Make sure that we use uncontained address nodes only for variables // that will be marked as mustInit and will be alive throughout the whole block even when tracked. assert(lclVarAddr->isContained() || !varDsc->lvTracked || varTypeIsStruct(varDsc)); // TODO: support this assert for uses, see https://github.com/dotnet/runtime/issues/51900. } assert(varDsc->lvDoNotEnregister); break; } case GT_PHI: case GT_PHI_ARG: assert(!"Should not see phi nodes after rationalize"); break; case GT_LCL_FLD: case GT_STORE_LCL_FLD: { const LclVarDsc* varDsc = compiler->lvaGetDesc(node->AsLclFld()); assert(varDsc->lvDoNotEnregister); } break; default: break; } } //------------------------------------------------------------------------ // Lowering::CheckBlock: check that the contents of an LIR block are in an // expected form after lowering. // // Arguments: // compiler - the compiler context. // block - the block to check. // bool Lowering::CheckBlock(Compiler* compiler, BasicBlock* block) { assert(block->isEmpty() || block->IsLIR()); LIR::Range& blockRange = LIR::AsRange(block); for (GenTree* node : blockRange) { CheckNode(compiler, node); } assert(blockRange.CheckLIR(compiler, true)); return true; } #endif //------------------------------------------------------------------------ // Lowering::LowerBlock: Lower all the nodes in a BasicBlock // // Arguments: // block - the block to lower. // void Lowering::LowerBlock(BasicBlock* block) { assert(block == comp->compCurBB); // compCurBB must already be set. assert(block->isEmpty() || block->IsLIR()); m_block = block; // NOTE: some of the lowering methods insert calls before the node being // lowered (See e.g. InsertPInvoke{Method,Call}{Prolog,Epilog}). In // general, any code that is inserted before the current node should be // "pre-lowered" as they won't be subject to further processing. // Lowering::CheckBlock() runs some extra checks on call arguments in // order to help catch unlowered nodes. GenTree* node = BlockRange().FirstNode(); while (node != nullptr) { node = LowerNode(node); } assert(CheckBlock(comp, block)); } /** Verifies if both of these trees represent the same indirection. * Used by Lower to annotate if CodeGen generate an instruction of the * form *addrMode BinOp= expr * * Preconditions: both trees are children of GT_INDs and their underlying children * have the same gtOper. * * This is a first iteration to actually recognize trees that can be code-generated * as a single read-modify-write instruction on AMD64/x86. For now * this method only supports the recognition of simple addressing modes (through GT_LEA) * or local var indirections. Local fields, array access and other more complex nodes are * not yet supported. * * TODO-CQ: Perform tree recognition by using the Value Numbering Package, that way we can recognize * arbitrary complex trees and support much more addressing patterns. */ bool Lowering::IndirsAreEquivalent(GenTree* candidate, GenTree* storeInd) { assert(candidate->OperGet() == GT_IND); assert(storeInd->OperGet() == GT_STOREIND); // We should check the size of the indirections. If they are // different, say because of a cast, then we can't call them equivalent. Doing so could cause us // to drop a cast. // Signed-ness difference is okay and expected since a store indirection must always // be signed based on the CIL spec, but a load could be unsigned. if (genTypeSize(candidate->gtType) != genTypeSize(storeInd->gtType)) { return false; } GenTree* pTreeA = candidate->gtGetOp1(); GenTree* pTreeB = storeInd->gtGetOp1(); // This method will be called by codegen (as well as during lowering). // After register allocation, the sources may have been spilled and reloaded // to a different register, indicated by an inserted GT_RELOAD node. pTreeA = pTreeA->gtSkipReloadOrCopy(); pTreeB = pTreeB->gtSkipReloadOrCopy(); genTreeOps oper; if (pTreeA->OperGet() != pTreeB->OperGet()) { return false; } oper = pTreeA->OperGet(); switch (oper) { case GT_LCL_VAR: case GT_LCL_VAR_ADDR: case GT_CLS_VAR_ADDR: case GT_CNS_INT: return NodesAreEquivalentLeaves(pTreeA, pTreeB); case GT_LEA: { GenTreeAddrMode* gtAddr1 = pTreeA->AsAddrMode(); GenTreeAddrMode* gtAddr2 = pTreeB->AsAddrMode(); return NodesAreEquivalentLeaves(gtAddr1->Base(), gtAddr2->Base()) && NodesAreEquivalentLeaves(gtAddr1->Index(), gtAddr2->Index()) && (gtAddr1->gtScale == gtAddr2->gtScale) && (gtAddr1->Offset() == gtAddr2->Offset()); } default: // We don't handle anything that is not either a constant, // a local var or LEA. return false; } } //------------------------------------------------------------------------ // NodesAreEquivalentLeaves: Check whether the two given nodes are the same leaves. // // Arguments: // tree1 and tree2 are nodes to be checked. // Return Value: // Returns true if they are same leaves, false otherwise. // // static bool Lowering::NodesAreEquivalentLeaves(GenTree* tree1, GenTree* tree2) { if (tree1 == tree2) { return true; } if (tree1 == nullptr || tree2 == nullptr) { return false; } tree1 = tree1->gtSkipReloadOrCopy(); tree2 = tree2->gtSkipReloadOrCopy(); if (tree1->TypeGet() != tree2->TypeGet()) { return false; } if (tree1->OperGet() != tree2->OperGet()) { return false; } if (!tree1->OperIsLeaf() || !tree2->OperIsLeaf()) { return false; } switch (tree1->OperGet()) { case GT_CNS_INT: return tree1->AsIntCon()->IconValue() == tree2->AsIntCon()->IconValue() && tree1->IsIconHandle() == tree2->IsIconHandle(); case GT_LCL_VAR: case GT_LCL_VAR_ADDR: return tree1->AsLclVarCommon()->GetLclNum() == tree2->AsLclVarCommon()->GetLclNum(); case GT_CLS_VAR_ADDR: return tree1->AsClsVar()->gtClsVarHnd == tree2->AsClsVar()->gtClsVarHnd; default: return false; } } //------------------------------------------------------------------------ // Lowering::CheckMultiRegLclVar: Check whether a MultiReg GT_LCL_VAR node can // remain a multi-reg. // // Arguments: // lclNode - the GT_LCL_VAR or GT_STORE_LCL_VAR node. // retTypeDesc - a return type descriptor either for a call source of a store of // the local, or for the GT_RETURN consumer of the local. // // Notes: // If retTypeDesc is non-null, this method will check that the fields are compatible. // Otherwise, it will only check that the lclVar is independently promoted // (i.e. it is marked lvPromoted and not lvDoNotEnregister). // bool Lowering::CheckMultiRegLclVar(GenTreeLclVar* lclNode, const ReturnTypeDesc* retTypeDesc) { bool canEnregister = false; #if FEATURE_MULTIREG_RET LclVarDsc* varDsc = comp->lvaGetDesc(lclNode->GetLclNum()); if ((comp->lvaEnregMultiRegVars) && varDsc->lvPromoted) { // We can enregister if we have a promoted struct and all the fields' types match the ABI requirements. // Note that we don't promote structs with explicit layout, so we don't need to check field offsets, and // if we have multiple types packed into a single register, we won't have matching reg and field counts, // so we can tolerate mismatches of integer size. if (varDsc->lvPromoted && (comp->lvaGetPromotionType(varDsc) == Compiler::PROMOTION_TYPE_INDEPENDENT)) { // If we have no retTypeDesc, we only care that it is independently promoted. if (retTypeDesc == nullptr) { canEnregister = true; } else { unsigned regCount = retTypeDesc->GetReturnRegCount(); if (regCount == varDsc->lvFieldCnt) { canEnregister = true; } } } } #ifdef TARGET_XARCH // For local stores on XARCH we only handle mismatched src/dest register count for // calls of SIMD type. If the source was another lclVar similarly promoted, we would // have broken it into multiple stores. if (lclNode->OperIs(GT_STORE_LCL_VAR) && !lclNode->gtGetOp1()->OperIs(GT_CALL)) { canEnregister = false; } #endif // TARGET_XARCH if (canEnregister) { lclNode->SetMultiReg(); } else { lclNode->ClearMultiReg(); if (varDsc->lvPromoted && !varDsc->lvDoNotEnregister) { comp->lvaSetVarDoNotEnregister(lclNode->GetLclNum() DEBUGARG(DoNotEnregisterReason::BlockOp)); } } #endif return canEnregister; } //------------------------------------------------------------------------ // Containment Analysis //------------------------------------------------------------------------ void Lowering::ContainCheckNode(GenTree* node) { switch (node->gtOper) { case GT_STORE_LCL_VAR: case GT_STORE_LCL_FLD: ContainCheckStoreLoc(node->AsLclVarCommon()); break; case GT_EQ: case GT_NE: case GT_LT: case GT_LE: case GT_GE: case GT_GT: case GT_TEST_EQ: case GT_TEST_NE: case GT_CMP: case GT_JCMP: ContainCheckCompare(node->AsOp()); break; case GT_JTRUE: ContainCheckJTrue(node->AsOp()); break; case GT_ADD: case GT_SUB: #if !defined(TARGET_64BIT) case GT_ADD_LO: case GT_ADD_HI: case GT_SUB_LO: case GT_SUB_HI: #endif case GT_AND: case GT_OR: case GT_XOR: ContainCheckBinary(node->AsOp()); break; #if defined(TARGET_X86) case GT_MUL_LONG: #endif case GT_MUL: case GT_MULHI: ContainCheckMul(node->AsOp()); break; case GT_DIV: case GT_MOD: case GT_UDIV: case GT_UMOD: ContainCheckDivOrMod(node->AsOp()); break; case GT_LSH: case GT_RSH: case GT_RSZ: case GT_ROL: case GT_ROR: #ifndef TARGET_64BIT case GT_LSH_HI: case GT_RSH_LO: #endif ContainCheckShiftRotate(node->AsOp()); break; case GT_ARR_OFFSET: ContainCheckArrOffset(node->AsArrOffs()); break; case GT_LCLHEAP: ContainCheckLclHeap(node->AsOp()); break; case GT_RETURN: ContainCheckRet(node->AsOp()); break; case GT_RETURNTRAP: ContainCheckReturnTrap(node->AsOp()); break; case GT_STOREIND: ContainCheckStoreIndir(node->AsStoreInd()); break; case GT_IND: ContainCheckIndir(node->AsIndir()); break; case GT_PUTARG_REG: case GT_PUTARG_STK: #if FEATURE_ARG_SPLIT case GT_PUTARG_SPLIT: #endif // FEATURE_ARG_SPLIT // The regNum must have been set by the lowering of the call. assert(node->GetRegNum() != REG_NA); break; #ifdef TARGET_XARCH case GT_INTRINSIC: ContainCheckIntrinsic(node->AsOp()); break; #endif // TARGET_XARCH #ifdef FEATURE_SIMD case GT_SIMD: ContainCheckSIMD(node->AsSIMD()); break; #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: ContainCheckHWIntrinsic(node->AsHWIntrinsic()); break; #endif // FEATURE_HW_INTRINSICS default: break; } } //------------------------------------------------------------------------ // ContainCheckReturnTrap: determine whether the source of a RETURNTRAP should be contained. // // Arguments: // node - pointer to the GT_RETURNTRAP node // void Lowering::ContainCheckReturnTrap(GenTreeOp* node) { #ifdef TARGET_XARCH assert(node->OperIs(GT_RETURNTRAP)); // This just turns into a compare of its child with an int + a conditional call if (node->gtOp1->isIndir()) { MakeSrcContained(node, node->gtOp1); } #endif // TARGET_XARCH } //------------------------------------------------------------------------ // ContainCheckArrOffset: determine whether the source of an ARR_OFFSET should be contained. // // Arguments: // node - pointer to the GT_ARR_OFFSET node // void Lowering::ContainCheckArrOffset(GenTreeArrOffs* node) { assert(node->OperIs(GT_ARR_OFFSET)); // we don't want to generate code for this if (node->gtOffset->IsIntegralConst(0)) { MakeSrcContained(node, node->AsArrOffs()->gtOffset); } } //------------------------------------------------------------------------ // ContainCheckLclHeap: determine whether the source of a GT_LCLHEAP node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckLclHeap(GenTreeOp* node) { assert(node->OperIs(GT_LCLHEAP)); GenTree* size = node->AsOp()->gtOp1; if (size->IsCnsIntOrI()) { MakeSrcContained(node, size); } } //------------------------------------------------------------------------ // ContainCheckRet: determine whether the source of a node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckRet(GenTreeUnOp* ret) { assert(ret->OperIs(GT_RETURN)); #if !defined(TARGET_64BIT) if (ret->TypeGet() == TYP_LONG) { GenTree* op1 = ret->gtGetOp1(); noway_assert(op1->OperGet() == GT_LONG); MakeSrcContained(ret, op1); } #endif // !defined(TARGET_64BIT) #if FEATURE_MULTIREG_RET if (ret->TypeIs(TYP_STRUCT)) { GenTree* op1 = ret->gtGetOp1(); // op1 must be either a lclvar or a multi-reg returning call if (op1->OperGet() == GT_LCL_VAR) { const LclVarDsc* varDsc = comp->lvaGetDesc(op1->AsLclVarCommon()); // This must be a multi-reg return or an HFA of a single element. assert(varDsc->lvIsMultiRegRet || (varDsc->lvIsHfa() && varTypeIsValidHfaType(varDsc->lvType))); // Mark var as contained if not enregisterable. if (!varDsc->IsEnregisterableLcl()) { if (!op1->IsMultiRegLclVar()) { MakeSrcContained(ret, op1); } } } } #endif // FEATURE_MULTIREG_RET } //------------------------------------------------------------------------ // ContainCheckJTrue: determine whether the source of a JTRUE should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckJTrue(GenTreeOp* node) { // The compare does not need to be generated into a register. GenTree* cmp = node->gtGetOp1(); cmp->gtType = TYP_VOID; cmp->gtFlags |= GTF_SET_FLAGS; } //------------------------------------------------------------------------ // ContainCheckBitCast: determine whether the source of a BITCAST should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckBitCast(GenTree* node) { GenTree* const op1 = node->AsOp()->gtOp1; if (op1->isMemoryOp()) { op1->SetContained(); } else if (op1->OperIs(GT_LCL_VAR)) { if (!m_lsra->willEnregisterLocalVars()) { op1->SetContained(); } const LclVarDsc* varDsc = comp->lvaGetDesc(op1->AsLclVar()); // TODO-Cleanup: we want to check if the local is already known not // to be on reg, for example, because local enreg is disabled. if (varDsc->lvDoNotEnregister) { op1->SetContained(); } else { op1->SetRegOptional(); } } else if (op1->IsLocal()) { op1->SetContained(); } } //------------------------------------------------------------------------ // LowerStoreIndirCommon: a common logic to lower StoreIndir. // // Arguments: // ind - the store indirection node we are lowering. // void Lowering::LowerStoreIndirCommon(GenTreeStoreInd* ind) { assert(ind->TypeGet() != TYP_STRUCT); #if defined(TARGET_ARM64) // Verify containment safety before creating an LEA that must be contained. // const bool isContainable = IsSafeToContainMem(ind, ind->Addr()); #else const bool isContainable = true; #endif TryCreateAddrMode(ind->Addr(), isContainable, ind); if (!comp->codeGen->gcInfo.gcIsWriteBarrierStoreIndNode(ind)) { if (varTypeIsFloating(ind) && ind->Data()->IsCnsFltOrDbl()) { // Optimize *x = DCON to *x = ICON which can be slightly faster and/or smaller. GenTree* data = ind->Data(); double dblCns = data->AsDblCon()->gtDconVal; ssize_t intCns = 0; var_types type = TYP_UNKNOWN; // XARCH: we can always contain the immediates. // ARM64: zero can always be contained, other cases will use immediates from the data // section and it is not a clear win to switch them to inline integers. // ARM: FP constants are assembled from integral ones, so it is always profitable // to directly use the integers as it avoids the int -> float conversion. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_XARCH) || defined(TARGET_ARM) bool shouldSwitchToInteger = true; #else // TARGET_ARM64 bool shouldSwitchToInteger = !data->IsCnsNonZeroFltOrDbl(); #endif if (shouldSwitchToInteger) { if (ind->TypeIs(TYP_FLOAT)) { float fltCns = static_cast<float>(dblCns); // should be a safe round-trip intCns = static_cast<ssize_t>(*reinterpret_cast<INT32*>(&fltCns)); type = TYP_INT; } #ifdef TARGET_64BIT else { assert(ind->TypeIs(TYP_DOUBLE)); intCns = static_cast<ssize_t>(*reinterpret_cast<INT64*>(&dblCns)); type = TYP_LONG; } #endif } if (type != TYP_UNKNOWN) { data->BashToConst(intCns, type); ind->ChangeType(type); } } LowerStoreIndir(ind); } } //------------------------------------------------------------------------ // LowerIndir: a common logic to lower IND load or NullCheck. // // Arguments: // ind - the ind node we are lowering. // void Lowering::LowerIndir(GenTreeIndir* ind) { assert(ind->OperIs(GT_IND, GT_NULLCHECK)); // Process struct typed indirs separately unless they are unused; // they only appear as the source of a block copy operation or a return node. if (!ind->TypeIs(TYP_STRUCT) || ind->IsUnusedValue()) { // TODO-Cleanup: We're passing isContainable = true but ContainCheckIndir rejects // address containment in some cases so we end up creating trivial (reg + offfset) // or (reg + reg) LEAs that are not necessary. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM64) // Verify containment safety before creating an LEA that must be contained. // const bool isContainable = IsSafeToContainMem(ind, ind->Addr()); #else const bool isContainable = true; #endif TryCreateAddrMode(ind->Addr(), isContainable, ind); ContainCheckIndir(ind); if (ind->OperIs(GT_NULLCHECK) || ind->IsUnusedValue()) { TransformUnusedIndirection(ind, comp, m_block); } } else { // If the `ADDR` node under `STORE_OBJ(dstAddr, IND(struct(ADDR))` // is a complex one it could benefit from an `LEA` that is not contained. const bool isContainable = false; TryCreateAddrMode(ind->Addr(), isContainable, ind); } } //------------------------------------------------------------------------ // TransformUnusedIndirection: change the opcode and the type of the unused indirection. // // Arguments: // ind - Indirection to transform. // comp - Compiler instance. // block - Basic block of the indirection. // void Lowering::TransformUnusedIndirection(GenTreeIndir* ind, Compiler* comp, BasicBlock* block) { // A nullcheck is essentially the same as an indirection with no use. // The difference lies in whether a target register must be allocated. // On XARCH we can generate a compare with no target register as long as the address // is not contained. // On ARM64 we can generate a load to REG_ZR in all cases. // However, on ARM we must always generate a load to a register. // In the case where we require a target register, it is better to use GT_IND, since // GT_NULLCHECK is a non-value node and would therefore require an internal register // to use as the target. That is non-optimal because it will be modeled as conflicting // with the source register(s). // So, to summarize: // - On ARM64, always use GT_NULLCHECK for a dead indirection. // - On ARM, always use GT_IND. // - On XARCH, use GT_IND if we have a contained address, and GT_NULLCHECK otherwise. // In all cases we try to preserve the original type and never make it wider to avoid AVEs. // For structs we conservatively lower it to BYTE. For 8-byte primitives we lower it to TYP_INT // on XARCH as an optimization. // assert(ind->OperIs(GT_NULLCHECK, GT_IND, GT_BLK, GT_OBJ)); ind->ChangeType(comp->gtTypeForNullCheck(ind)); #ifdef TARGET_ARM64 bool useNullCheck = true; #elif TARGET_ARM bool useNullCheck = false; #else // TARGET_XARCH bool useNullCheck = !ind->Addr()->isContained(); #endif // !TARGET_XARCH if (useNullCheck && !ind->OperIs(GT_NULLCHECK)) { comp->gtChangeOperToNullCheck(ind, block); ind->ClearUnusedValue(); } else if (!useNullCheck && !ind->OperIs(GT_IND)) { ind->ChangeOper(GT_IND); ind->SetUnusedValue(); } } //------------------------------------------------------------------------ // LowerBlockStoreCommon: a common logic to lower STORE_OBJ/BLK/DYN_BLK. // // Arguments: // blkNode - the store blk/obj node we are lowering. // void Lowering::LowerBlockStoreCommon(GenTreeBlk* blkNode) { assert(blkNode->OperIs(GT_STORE_BLK, GT_STORE_DYN_BLK, GT_STORE_OBJ)); // Lose the type information stored in the source - we no longer need it. if (blkNode->Data()->OperIs(GT_OBJ, GT_BLK)) { blkNode->Data()->SetOper(GT_IND); LowerIndir(blkNode->Data()->AsIndir()); } if (TryTransformStoreObjAsStoreInd(blkNode)) { return; } LowerBlockStore(blkNode); } //------------------------------------------------------------------------ // TryTransformStoreObjAsStoreInd: try to replace STORE_OBJ/BLK as STOREIND. // // Arguments: // blkNode - the store node. // // Return value: // true if the replacement was made, false otherwise. // // Notes: // TODO-CQ: this method should do the transformation when possible // and STOREIND should always generate better or the same code as // STORE_OBJ/BLK for the same copy. // bool Lowering::TryTransformStoreObjAsStoreInd(GenTreeBlk* blkNode) { assert(blkNode->OperIs(GT_STORE_BLK, GT_STORE_DYN_BLK, GT_STORE_OBJ)); if (!comp->opts.OptimizationEnabled()) { return false; } if (blkNode->OperIs(GT_STORE_DYN_BLK)) { return false; } ClassLayout* layout = blkNode->GetLayout(); if (layout == nullptr) { return false; } var_types regType = layout->GetRegisterType(); if (regType == TYP_UNDEF) { return false; } GenTree* src = blkNode->Data(); if (varTypeIsSIMD(regType) && src->IsConstInitVal()) { // TODO-CQ: support STORE_IND SIMD16(SIMD16, CNT_INT 0). return false; } if (varTypeIsGC(regType)) { // TODO-CQ: STOREIND does not try to contain src if we need a barrier, // STORE_OBJ generates better code currently. return false; } if (src->OperIsInitVal() && !src->IsConstInitVal()) { return false; } if (varTypeIsSmall(regType) && !src->IsConstInitVal() && !src->IsLocal()) { // source operand INDIR will use a widening instruction // and generate worse code, like `movzx` instead of `mov` // on x64. return false; } JITDUMP("Replacing STORE_OBJ with STOREIND for [%06u]\n", blkNode->gtTreeID); blkNode->ChangeOper(GT_STOREIND); blkNode->ChangeType(regType); if ((blkNode->gtFlags & GTF_IND_TGT_NOT_HEAP) == 0) { blkNode->gtFlags |= GTF_IND_TGTANYWHERE; } if (varTypeIsStruct(src)) { src->ChangeType(regType); LowerNode(blkNode->Data()); } else if (src->OperIsInitVal()) { GenTreeUnOp* initVal = src->AsUnOp(); src = src->gtGetOp1(); assert(src->IsCnsIntOrI()); src->AsIntCon()->FixupInitBlkValue(regType); blkNode->SetData(src); BlockRange().Remove(initVal); } else { assert(src->TypeIs(regType) || src->IsCnsIntOrI() || src->IsCall()); } LowerStoreIndirCommon(blkNode->AsStoreInd()); return true; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Lower XX XX XX XX Preconditions: XX XX XX XX Postconditions (for the nodes currently handled): XX XX - All operands requiring a register are explicit in the graph XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "lower.h" #if !defined(TARGET_64BIT) #include "decomposelongs.h" #endif // !defined(TARGET_64BIT) //------------------------------------------------------------------------ // MakeSrcContained: Make "childNode" a contained node // // Arguments: // parentNode - is a non-leaf node that can contain its 'childNode' // childNode - is an op that will now be contained by its parent. // // Notes: // If 'childNode' it has any existing sources, they will now be sources for the parent. // void Lowering::MakeSrcContained(GenTree* parentNode, GenTree* childNode) const { assert(!parentNode->OperIsLeaf()); assert(childNode->canBeContained()); childNode->SetContained(); assert(childNode->isContained()); #ifdef DEBUG if (IsContainableMemoryOp(childNode)) { // Verify caller of this method checked safety. // const bool isSafeToContainMem = IsSafeToContainMem(parentNode, childNode); if (!isSafeToContainMem) { JITDUMP("** Unsafe mem containment of [%06u] in [%06u}, comp->dspTreeID(childNode), " "comp->dspTreeID(parentNode)\n"); assert(isSafeToContainMem); } } #endif } //------------------------------------------------------------------------ // CheckImmedAndMakeContained: Checks if the 'childNode' is a containable immediate // and, if so, makes it contained. // // Arguments: // parentNode - is any non-leaf node // childNode - is an child op of 'parentNode' // // Return value: // true if we are able to make childNode a contained immediate // bool Lowering::CheckImmedAndMakeContained(GenTree* parentNode, GenTree* childNode) { assert(!parentNode->OperIsLeaf()); // If childNode is a containable immediate if (IsContainableImmed(parentNode, childNode)) { // then make it contained within the parentNode MakeSrcContained(parentNode, childNode); return true; } return false; } //------------------------------------------------------------------------ // IsSafeToContainMem: Checks for conflicts between childNode and parentNode, // and returns 'true' iff memory operand childNode can be contained in parentNode. // // Arguments: // parentNode - any non-leaf node // childNode - some node that is an input to `parentNode` // // Return value: // true if it is safe to make childNode a contained memory operand. // bool Lowering::IsSafeToContainMem(GenTree* parentNode, GenTree* childNode) const { // Quick early-out for unary cases // if (childNode->gtNext == parentNode) { return true; } m_scratchSideEffects.Clear(); m_scratchSideEffects.AddNode(comp, childNode); for (GenTree* node = childNode->gtNext; node != parentNode; node = node->gtNext) { const bool strict = true; if (m_scratchSideEffects.InterferesWith(comp, node, strict)) { return false; } } return true; } //------------------------------------------------------------------------ // IsSafeToContainMem: Checks for conflicts between childNode and grandParentNode // and returns 'true' iff memory operand childNode can be contained in ancestorNode // // Arguments: // grandParentNode - any non-leaf node // parentNode - parent of `childNode` and an input to `grandParentNode` // childNode - some node that is an input to `parentNode` // // Return value: // true if it is safe to make childNode a contained memory operand. // bool Lowering::IsSafeToContainMem(GenTree* grandparentNode, GenTree* parentNode, GenTree* childNode) const { m_scratchSideEffects.Clear(); m_scratchSideEffects.AddNode(comp, childNode); for (GenTree* node = childNode->gtNext; node != grandparentNode; node = node->gtNext) { if (node == parentNode) { continue; } const bool strict = true; if (m_scratchSideEffects.InterferesWith(comp, node, strict)) { return false; } } return true; } //------------------------------------------------------------------------ // LowerNode: this is the main entry point for Lowering. // // Arguments: // node - the node we are lowering. // // Returns: // next node in the transformed node sequence that needs to be lowered. // GenTree* Lowering::LowerNode(GenTree* node) { assert(node != nullptr); switch (node->gtOper) { case GT_NULLCHECK: case GT_IND: LowerIndir(node->AsIndir()); break; case GT_STOREIND: LowerStoreIndirCommon(node->AsStoreInd()); break; case GT_ADD: { GenTree* next = LowerAdd(node->AsOp()); if (next != nullptr) { return next; } } break; #if !defined(TARGET_64BIT) case GT_ADD_LO: case GT_ADD_HI: case GT_SUB_LO: case GT_SUB_HI: #endif case GT_SUB: case GT_AND: case GT_OR: case GT_XOR: return LowerBinaryArithmetic(node->AsOp()); case GT_MUL: case GT_MULHI: #if defined(TARGET_X86) || defined(TARGET_ARM64) case GT_MUL_LONG: #endif return LowerMul(node->AsOp()); case GT_UDIV: case GT_UMOD: if (!LowerUnsignedDivOrMod(node->AsOp())) { ContainCheckDivOrMod(node->AsOp()); } break; case GT_DIV: case GT_MOD: return LowerSignedDivOrMod(node); case GT_SWITCH: return LowerSwitch(node); case GT_CALL: LowerCall(node); break; case GT_LT: case GT_LE: case GT_GT: case GT_GE: case GT_EQ: case GT_NE: case GT_TEST_EQ: case GT_TEST_NE: case GT_CMP: return LowerCompare(node); case GT_JTRUE: return LowerJTrue(node->AsOp()); case GT_JMP: LowerJmpMethod(node); break; case GT_RETURN: LowerRet(node->AsUnOp()); break; case GT_RETURNTRAP: ContainCheckReturnTrap(node->AsOp()); break; case GT_CAST: LowerCast(node); break; #if defined(TARGET_XARCH) || defined(TARGET_ARM64) case GT_BOUNDS_CHECK: ContainCheckBoundsChk(node->AsBoundsChk()); break; #endif // TARGET_XARCH case GT_ARR_ELEM: return LowerArrElem(node); case GT_ARR_OFFSET: ContainCheckArrOffset(node->AsArrOffs()); break; case GT_ROL: case GT_ROR: LowerRotate(node); break; #ifndef TARGET_64BIT case GT_LSH_HI: case GT_RSH_LO: ContainCheckShiftRotate(node->AsOp()); break; #endif // !TARGET_64BIT case GT_LSH: case GT_RSH: case GT_RSZ: #if defined(TARGET_XARCH) || defined(TARGET_ARM64) LowerShift(node->AsOp()); #else ContainCheckShiftRotate(node->AsOp()); #endif break; case GT_STORE_BLK: case GT_STORE_OBJ: if (node->AsBlk()->Data()->IsCall()) { LowerStoreSingleRegCallStruct(node->AsBlk()); break; } FALLTHROUGH; case GT_STORE_DYN_BLK: LowerBlockStoreCommon(node->AsBlk()); break; case GT_LCLHEAP: ContainCheckLclHeap(node->AsOp()); break; #ifdef TARGET_XARCH case GT_INTRINSIC: ContainCheckIntrinsic(node->AsOp()); break; #endif // TARGET_XARCH #ifdef FEATURE_SIMD case GT_SIMD: LowerSIMD(node->AsSIMD()); break; #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: LowerHWIntrinsic(node->AsHWIntrinsic()); break; #endif // FEATURE_HW_INTRINSICS case GT_LCL_FLD: { // We should only encounter this for lclVars that are lvDoNotEnregister. verifyLclFldDoNotEnregister(node->AsLclVarCommon()->GetLclNum()); break; } case GT_LCL_VAR: { GenTreeLclVar* lclNode = node->AsLclVar(); WidenSIMD12IfNecessary(lclNode); LclVarDsc* varDsc = comp->lvaGetDesc(lclNode); // The consumer of this node must check compatibility of the fields. // This merely checks whether it is possible for this to be a multireg node. if (lclNode->IsMultiRegLclVar()) { if (!varDsc->lvPromoted || (comp->lvaGetPromotionType(varDsc) != Compiler::PROMOTION_TYPE_INDEPENDENT) || (varDsc->lvFieldCnt > MAX_MULTIREG_COUNT)) { lclNode->ClearMultiReg(); if (lclNode->TypeIs(TYP_STRUCT)) { comp->lvaSetVarDoNotEnregister(lclNode->GetLclNum() DEBUGARG(DoNotEnregisterReason::BlockOp)); } } } break; } case GT_STORE_LCL_VAR: WidenSIMD12IfNecessary(node->AsLclVarCommon()); FALLTHROUGH; case GT_STORE_LCL_FLD: LowerStoreLocCommon(node->AsLclVarCommon()); break; #if defined(TARGET_ARM64) case GT_CMPXCHG: CheckImmedAndMakeContained(node, node->AsCmpXchg()->gtOpComparand); break; case GT_XORR: case GT_XAND: case GT_XADD: CheckImmedAndMakeContained(node, node->AsOp()->gtOp2); break; #elif defined(TARGET_XARCH) case GT_XORR: case GT_XAND: case GT_XADD: if (node->IsUnusedValue()) { node->ClearUnusedValue(); // Make sure the types are identical, since the node type is changed to VOID // CodeGen relies on op2's type to determine the instruction size. // Note that the node type cannot be a small int but the data operand can. assert(genActualType(node->gtGetOp2()->TypeGet()) == node->TypeGet()); node->SetOper(GT_LOCKADD); node->gtType = TYP_VOID; CheckImmedAndMakeContained(node, node->gtGetOp2()); } break; #endif #ifndef TARGET_ARMARCH // TODO-ARMARCH-CQ: We should contain this as long as the offset fits. case GT_OBJ: if (node->AsObj()->Addr()->OperIsLocalAddr()) { node->AsObj()->Addr()->SetContained(); } break; #endif // !TARGET_ARMARCH case GT_KEEPALIVE: node->gtGetOp1()->SetRegOptional(); break; case GT_LCL_FLD_ADDR: case GT_LCL_VAR_ADDR: { const GenTreeLclVarCommon* lclAddr = node->AsLclVarCommon(); const LclVarDsc* varDsc = comp->lvaGetDesc(lclAddr); if (!varDsc->lvDoNotEnregister) { // TODO-Cleanup: this is definitely not the best place for this detection, // but for now it is the easiest. Move it to morph. comp->lvaSetVarDoNotEnregister(lclAddr->GetLclNum() DEBUGARG(DoNotEnregisterReason::LclAddrNode)); } } break; default: break; } return node->gtNext; } /** -- Switch Lowering -- * The main idea of switch lowering is to keep transparency of the register requirements of this node * downstream in LSRA. Given that the switch instruction is inherently a control statement which in the JIT * is represented as a simple tree node, at the time we actually generate code for it we end up * generating instructions that actually modify the flow of execution that imposes complicated * register requirement and lifetimes. * * So, for the purpose of LSRA, we want to have a more detailed specification of what a switch node actually * means and more importantly, which and when do we need a register for each instruction we want to issue * to correctly allocate them downstream. * * For this purpose, this procedure performs switch lowering in two different ways: * * a) Represent the switch statement as a zero-index jump table construct. This means that for every destination * of the switch, we will store this destination in an array of addresses and the code generator will issue * a data section where this array will live and will emit code that based on the switch index, will indirect and * jump to the destination specified in the jump table. * * For this transformation we introduce a new GT node called GT_SWITCH_TABLE that is a specialization of the switch * node for jump table based switches. * The overall structure of a GT_SWITCH_TABLE is: * * GT_SWITCH_TABLE * |_________ localVar (a temporary local that holds the switch index) * |_________ jumpTable (this is a special node that holds the address of the jump table array) * * Now, the way we morph a GT_SWITCH node into this lowered switch table node form is the following: * * Input: GT_SWITCH (inside a basic block whose Branch Type is BBJ_SWITCH) * |_____ expr (an arbitrarily complex GT_NODE that represents the switch index) * * This gets transformed into the following statements inside a BBJ_COND basic block (the target would be * the default case of the switch in case the conditional is evaluated to true). * * ----- original block, transformed * GT_STORE_LCL_VAR tempLocal (a new temporary local variable used to store the switch index) * |_____ expr (the index expression) * * GT_JTRUE * |_____ GT_COND * |_____ GT_GE * |___ Int_Constant (This constant is the index of the default case * that happens to be the highest index in the jump table). * |___ tempLocal (The local variable were we stored the index expression). * * ----- new basic block * GT_SWITCH_TABLE * |_____ tempLocal * |_____ jumpTable (a new jump table node that now LSRA can allocate registers for explicitly * and LinearCodeGen will be responsible to generate downstream). * * This way there are no implicit temporaries. * * b) For small-sized switches, we will actually morph them into a series of conditionals of the form * if (case falls into the default){ goto jumpTable[size]; // last entry in the jump table is the default case } * (For the default case conditional, we'll be constructing the exact same code as the jump table case one). * else if (case == firstCase){ goto jumpTable[1]; } * else if (case == secondCase) { goto jumptable[2]; } and so on. * * This transformation is of course made in JIT-IR, not downstream to CodeGen level, so this way we no longer * require internal temporaries to maintain the index we're evaluating plus we're using existing code from * LinearCodeGen to implement this instead of implement all the control flow constructs using InstrDscs and * InstrGroups downstream. */ GenTree* Lowering::LowerSwitch(GenTree* node) { unsigned jumpCnt; unsigned targetCnt; BasicBlock** jumpTab; assert(node->gtOper == GT_SWITCH); // The first step is to build the default case conditional construct that is // shared between both kinds of expansion of the switch node. // To avoid confusion, we'll alias m_block to originalSwitchBB // that represents the node we're morphing. BasicBlock* originalSwitchBB = m_block; LIR::Range& switchBBRange = LIR::AsRange(originalSwitchBB); // jumpCnt is the number of elements in the jump table array. // jumpTab is the actual pointer to the jump table array. // targetCnt is the number of unique targets in the jump table array. jumpCnt = originalSwitchBB->bbJumpSwt->bbsCount; jumpTab = originalSwitchBB->bbJumpSwt->bbsDstTab; targetCnt = originalSwitchBB->NumSucc(comp); // GT_SWITCH must be a top-level node with no use. #ifdef DEBUG { LIR::Use use; assert(!switchBBRange.TryGetUse(node, &use)); } #endif JITDUMP("Lowering switch " FMT_BB ", %d cases\n", originalSwitchBB->bbNum, jumpCnt); // Handle a degenerate case: if the switch has only a default case, just convert it // to an unconditional branch. This should only happen in minopts or with debuggable // code. if (targetCnt == 1) { JITDUMP("Lowering switch " FMT_BB ": single target; converting to BBJ_ALWAYS\n", originalSwitchBB->bbNum); noway_assert(comp->opts.OptimizationDisabled()); if (originalSwitchBB->bbNext == jumpTab[0]) { originalSwitchBB->bbJumpKind = BBJ_NONE; originalSwitchBB->bbJumpDest = nullptr; } else { originalSwitchBB->bbJumpKind = BBJ_ALWAYS; originalSwitchBB->bbJumpDest = jumpTab[0]; } // Remove extra predecessor links if there was more than one case. for (unsigned i = 1; i < jumpCnt; ++i) { (void)comp->fgRemoveRefPred(jumpTab[i], originalSwitchBB); } // We have to get rid of the GT_SWITCH node but a child might have side effects so just assign // the result of the child subtree to a temp. GenTree* rhs = node->AsOp()->gtOp1; unsigned lclNum = comp->lvaGrabTemp(true DEBUGARG("Lowering is creating a new local variable")); comp->lvaTable[lclNum].lvType = rhs->TypeGet(); GenTreeLclVar* store = comp->gtNewStoreLclVar(lclNum, rhs); switchBBRange.InsertAfter(node, store); switchBBRange.Remove(node); return store; } noway_assert(jumpCnt >= 2); // Spill the argument to the switch node into a local so that it can be used later. LIR::Use use(switchBBRange, &(node->AsOp()->gtOp1), node); ReplaceWithLclVar(use); // GT_SWITCH(indexExpression) is now two statements: // 1. a statement containing 'asg' (for temp = indexExpression) // 2. and a statement with GT_SWITCH(temp) assert(node->gtOper == GT_SWITCH); GenTree* temp = node->AsOp()->gtOp1; assert(temp->gtOper == GT_LCL_VAR); unsigned tempLclNum = temp->AsLclVarCommon()->GetLclNum(); var_types tempLclType = temp->TypeGet(); BasicBlock* defaultBB = jumpTab[jumpCnt - 1]; BasicBlock* followingBB = originalSwitchBB->bbNext; /* Is the number of cases right for a test and jump switch? */ const bool fFirstCaseFollows = (followingBB == jumpTab[0]); const bool fDefaultFollows = (followingBB == defaultBB); unsigned minSwitchTabJumpCnt = 2; // table is better than just 2 cmp/jcc // This means really just a single cmp/jcc (aka a simple if/else) if (fFirstCaseFollows || fDefaultFollows) { minSwitchTabJumpCnt++; } #if defined(TARGET_ARM) // On ARM for small switch tables we will // generate a sequence of compare and branch instructions // because the code to load the base of the switch // table is huge and hideous due to the relocation... :( minSwitchTabJumpCnt += 2; #endif // TARGET_ARM // Once we have the temporary variable, we construct the conditional branch for // the default case. As stated above, this conditional is being shared between // both GT_SWITCH lowering code paths. // This condition is of the form: if (temp > jumpTableLength - 2){ goto jumpTable[jumpTableLength - 1]; } GenTree* gtDefaultCaseCond = comp->gtNewOperNode(GT_GT, TYP_INT, comp->gtNewLclvNode(tempLclNum, tempLclType), comp->gtNewIconNode(jumpCnt - 2, genActualType(tempLclType))); // Make sure we perform an unsigned comparison, just in case the switch index in 'temp' // is now less than zero 0 (that would also hit the default case). gtDefaultCaseCond->gtFlags |= GTF_UNSIGNED; GenTree* gtDefaultCaseJump = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, gtDefaultCaseCond); gtDefaultCaseJump->gtFlags = node->gtFlags; LIR::Range condRange = LIR::SeqTree(comp, gtDefaultCaseJump); switchBBRange.InsertAtEnd(std::move(condRange)); BasicBlock* afterDefaultCondBlock = comp->fgSplitBlockAfterNode(originalSwitchBB, condRange.LastNode()); // afterDefaultCondBlock is now the switch, and all the switch targets have it as a predecessor. // originalSwitchBB is now a BBJ_NONE, and there is a predecessor edge in afterDefaultCondBlock // representing the fall-through flow from originalSwitchBB. assert(originalSwitchBB->bbJumpKind == BBJ_NONE); assert(originalSwitchBB->bbNext == afterDefaultCondBlock); assert(afterDefaultCondBlock->bbJumpKind == BBJ_SWITCH); assert(afterDefaultCondBlock->bbJumpSwt->bbsHasDefault); assert(afterDefaultCondBlock->isEmpty()); // Nothing here yet. // The GT_SWITCH code is still in originalSwitchBB (it will be removed later). // Turn originalSwitchBB into a BBJ_COND. originalSwitchBB->bbJumpKind = BBJ_COND; originalSwitchBB->bbJumpDest = jumpTab[jumpCnt - 1]; // Fix the pred for the default case: the default block target still has originalSwitchBB // as a predecessor, but the fgSplitBlockAfterStatement() moved all predecessors to point // to afterDefaultCondBlock. flowList* oldEdge = comp->fgRemoveRefPred(jumpTab[jumpCnt - 1], afterDefaultCondBlock); comp->fgAddRefPred(jumpTab[jumpCnt - 1], originalSwitchBB, oldEdge); bool useJumpSequence = jumpCnt < minSwitchTabJumpCnt; if (TargetOS::IsUnix && TargetArchitecture::IsArm32) { // Force using an inlined jumping instead switch table generation. // Switch jump table is generated with incorrect values in CoreRT case, // so any large switch will crash after loading to PC any such value. // I think this is due to the fact that we use absolute addressing // instead of relative. But in CoreRT is used as a rule relative // addressing when we generate an executable. // See also https://github.com/dotnet/runtime/issues/8683 // Also https://github.com/dotnet/coreclr/pull/13197 useJumpSequence = useJumpSequence || comp->IsTargetAbi(CORINFO_CORERT_ABI); } // If we originally had 2 unique successors, check to see whether there is a unique // non-default case, in which case we can eliminate the switch altogether. // Note that the single unique successor case is handled above. BasicBlock* uniqueSucc = nullptr; if (targetCnt == 2) { uniqueSucc = jumpTab[0]; noway_assert(jumpCnt >= 2); for (unsigned i = 1; i < jumpCnt - 1; i++) { if (jumpTab[i] != uniqueSucc) { uniqueSucc = nullptr; break; } } } if (uniqueSucc != nullptr) { // If the unique successor immediately follows this block, we have nothing to do - // it will simply fall-through after we remove the switch, below. // Otherwise, make this a BBJ_ALWAYS. // Now, fixup the predecessor links to uniqueSucc. In the original jumpTab: // jumpTab[i-1] was the default target, which we handled above, // jumpTab[0] is the first target, and we'll leave that predecessor link. // Remove any additional predecessor links to uniqueSucc. for (unsigned i = 1; i < jumpCnt - 1; ++i) { assert(jumpTab[i] == uniqueSucc); (void)comp->fgRemoveRefPred(uniqueSucc, afterDefaultCondBlock); } if (afterDefaultCondBlock->bbNext == uniqueSucc) { afterDefaultCondBlock->bbJumpKind = BBJ_NONE; afterDefaultCondBlock->bbJumpDest = nullptr; } else { afterDefaultCondBlock->bbJumpKind = BBJ_ALWAYS; afterDefaultCondBlock->bbJumpDest = uniqueSucc; } } // If the number of possible destinations is small enough, we proceed to expand the switch // into a series of conditional branches, otherwise we follow the jump table based switch // transformation. else if (useJumpSequence || comp->compStressCompile(Compiler::STRESS_SWITCH_CMP_BR_EXPANSION, 50)) { // Lower the switch into a series of compare and branch IR trees. // // In this case we will morph the node in the following way: // 1. Generate a JTRUE statement to evaluate the default case. (This happens above.) // 2. Start splitting the switch basic block into subsequent basic blocks, each of which will contain // a statement that is responsible for performing a comparison of the table index and conditional // branch if equal. JITDUMP("Lowering switch " FMT_BB ": using compare/branch expansion\n", originalSwitchBB->bbNum); // We'll use 'afterDefaultCondBlock' for the first conditional. After that, we'll add new // blocks. If we end up not needing it at all (say, if all the non-default cases just fall through), // we'll delete it. bool fUsedAfterDefaultCondBlock = false; BasicBlock* currentBlock = afterDefaultCondBlock; LIR::Range* currentBBRange = &LIR::AsRange(currentBlock); // Walk to entries 0 to jumpCnt - 1. If a case target follows, ignore it and let it fall through. // If no case target follows, the last one doesn't need to be a compare/branch: it can be an // unconditional branch. bool fAnyTargetFollows = false; for (unsigned i = 0; i < jumpCnt - 1; ++i) { assert(currentBlock != nullptr); // Remove the switch from the predecessor list of this case target's block. // We'll add the proper new predecessor edge later. flowList* oldEdge = comp->fgRemoveRefPred(jumpTab[i], afterDefaultCondBlock); if (jumpTab[i] == followingBB) { // This case label follows the switch; let it fall through. fAnyTargetFollows = true; continue; } // We need a block to put in the new compare and/or branch. // If we haven't used the afterDefaultCondBlock yet, then use that. if (fUsedAfterDefaultCondBlock) { BasicBlock* newBlock = comp->fgNewBBafter(BBJ_NONE, currentBlock, true); comp->fgAddRefPred(newBlock, currentBlock); // The fall-through predecessor. currentBlock = newBlock; currentBBRange = &LIR::AsRange(currentBlock); } else { assert(currentBlock == afterDefaultCondBlock); fUsedAfterDefaultCondBlock = true; } // We're going to have a branch, either a conditional or unconditional, // to the target. Set the target. currentBlock->bbJumpDest = jumpTab[i]; // Wire up the predecessor list for the "branch" case. comp->fgAddRefPred(jumpTab[i], currentBlock, oldEdge); if (!fAnyTargetFollows && (i == jumpCnt - 2)) { // We're processing the last one, and there is no fall through from any case // to the following block, so we can use an unconditional branch to the final // case: there is no need to compare against the case index, since it's // guaranteed to be taken (since the default case was handled first, above). currentBlock->bbJumpKind = BBJ_ALWAYS; } else { // Otherwise, it's a conditional branch. Set the branch kind, then add the // condition statement. currentBlock->bbJumpKind = BBJ_COND; // Now, build the conditional statement for the current case that is // being evaluated: // GT_JTRUE // |__ GT_COND // |____GT_EQ // |____ (switchIndex) (The temp variable) // |____ (ICon) (The actual case constant) GenTree* gtCaseCond = comp->gtNewOperNode(GT_EQ, TYP_INT, comp->gtNewLclvNode(tempLclNum, tempLclType), comp->gtNewIconNode(i, tempLclType)); GenTree* gtCaseBranch = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, gtCaseCond); LIR::Range caseRange = LIR::SeqTree(comp, gtCaseBranch); currentBBRange->InsertAtEnd(std::move(caseRange)); } } if (fAnyTargetFollows) { // There is a fall-through to the following block. In the loop // above, we deleted all the predecessor edges from the switch. // In this case, we need to add one back. comp->fgAddRefPred(currentBlock->bbNext, currentBlock); } if (!fUsedAfterDefaultCondBlock) { // All the cases were fall-through! We don't need this block. // Convert it from BBJ_SWITCH to BBJ_NONE and unset the BBF_DONT_REMOVE flag // so fgRemoveBlock() doesn't complain. JITDUMP("Lowering switch " FMT_BB ": all switch cases were fall-through\n", originalSwitchBB->bbNum); assert(currentBlock == afterDefaultCondBlock); assert(currentBlock->bbJumpKind == BBJ_SWITCH); currentBlock->bbJumpKind = BBJ_NONE; currentBlock->bbFlags &= ~BBF_DONT_REMOVE; comp->fgRemoveBlock(currentBlock, /* unreachable */ false); // It's an empty block. } } else { // At this point the default case has already been handled and we need to generate a jump // table based switch or a bit test based switch at the end of afterDefaultCondBlock. Both // switch variants need the switch value so create the necessary LclVar node here. GenTree* switchValue = comp->gtNewLclvNode(tempLclNum, tempLclType); LIR::Range& switchBlockRange = LIR::AsRange(afterDefaultCondBlock); switchBlockRange.InsertAtEnd(switchValue); // Try generating a bit test based switch first, // if that's not possible a jump table based switch will be generated. if (!TryLowerSwitchToBitTest(jumpTab, jumpCnt, targetCnt, afterDefaultCondBlock, switchValue)) { JITDUMP("Lowering switch " FMT_BB ": using jump table expansion\n", originalSwitchBB->bbNum); #ifdef TARGET_64BIT if (tempLclType != TYP_I_IMPL) { // SWITCH_TABLE expects the switch value (the index into the jump table) to be TYP_I_IMPL. // Note that the switch value is unsigned so the cast should be unsigned as well. switchValue = comp->gtNewCastNode(TYP_I_IMPL, switchValue, true, TYP_U_IMPL); switchBlockRange.InsertAtEnd(switchValue); } #endif GenTree* switchTable = comp->gtNewJmpTableNode(); GenTree* switchJump = comp->gtNewOperNode(GT_SWITCH_TABLE, TYP_VOID, switchValue, switchTable); switchBlockRange.InsertAfter(switchValue, switchTable, switchJump); // this block no longer branches to the default block afterDefaultCondBlock->bbJumpSwt->removeDefault(); } comp->fgInvalidateSwitchDescMapEntry(afterDefaultCondBlock); } GenTree* next = node->gtNext; // Get rid of the GT_SWITCH(temp). switchBBRange.Remove(node->AsOp()->gtOp1); switchBBRange.Remove(node); return next; } //------------------------------------------------------------------------ // TryLowerSwitchToBitTest: Attempts to transform a jump table switch into a bit test. // // Arguments: // jumpTable - The jump table // jumpCount - The number of blocks in the jump table // targetCount - The number of distinct blocks in the jump table // bbSwitch - The switch block // switchValue - A LclVar node that provides the switch value // // Return value: // true if the switch has been lowered to a bit test // // Notes: // If the jump table contains less than 32 (64 on 64 bit targets) entries and there // are at most 2 distinct jump targets then the jump table can be converted to a word // of bits where a 0 bit corresponds to one jump target and a 1 bit corresponds to the // other jump target. Instead of the indirect jump a BT-JCC sequence is used to jump // to the appropriate target: // mov eax, 245 ; jump table converted to a "bit table" // bt eax, ebx ; ebx is supposed to contain the switch value // jc target1 // target0: // ... // target1: // Such code is both shorter and faster (in part due to the removal of a memory load) // than the traditional jump table base code. And of course, it also avoids the need // to emit the jump table itself that can reach up to 256 bytes (for 64 entries). // bool Lowering::TryLowerSwitchToBitTest( BasicBlock* jumpTable[], unsigned jumpCount, unsigned targetCount, BasicBlock* bbSwitch, GenTree* switchValue) { #ifndef TARGET_XARCH // Other architectures may use this if they substitute GT_BT with equivalent code. return false; #else assert(jumpCount >= 2); assert(targetCount >= 2); assert(bbSwitch->bbJumpKind == BBJ_SWITCH); assert(switchValue->OperIs(GT_LCL_VAR)); // // Quick check to see if it's worth going through the jump table. The bit test switch supports // up to 2 targets but targetCount also includes the default block so we need to allow 3 targets. // We'll ensure that there are only 2 targets when building the bit table. // if (targetCount > 3) { return false; } // // The number of bits in the bit table is the same as the number of jump table entries. But the // jump table also includes the default target (at the end) so we need to ignore it. The default // has already been handled by a JTRUE(GT(switchValue, jumpCount - 2)) that LowerSwitch generates. // const unsigned bitCount = jumpCount - 1; if (bitCount > (genTypeSize(TYP_I_IMPL) * 8)) { return false; } // // Build a bit table where a bit set to 0 corresponds to bbCase0 and a bit set to 1 corresponds to // bbCase1. Simply use the first block in the jump table as bbCase1, later we can invert the bit // table and/or swap the blocks if it's beneficial. // BasicBlock* bbCase0 = nullptr; BasicBlock* bbCase1 = jumpTable[0]; size_t bitTable = 1; for (unsigned bitIndex = 1; bitIndex < bitCount; bitIndex++) { if (jumpTable[bitIndex] == bbCase1) { bitTable |= (size_t(1) << bitIndex); } else if (bbCase0 == nullptr) { bbCase0 = jumpTable[bitIndex]; } else if (jumpTable[bitIndex] != bbCase0) { // If it's neither bbCase0 nor bbCase1 then it means we have 3 targets. There can't be more // than 3 because of the check at the start of the function. assert(targetCount == 3); return false; } } // // One of the case blocks has to follow the switch block. This requirement could be avoided // by adding a BBJ_ALWAYS block after the switch block but doing that sometimes negatively // impacts register allocation. // if ((bbSwitch->bbNext != bbCase0) && (bbSwitch->bbNext != bbCase1)) { return false; } #ifdef TARGET_64BIT // // See if we can avoid a 8 byte immediate on 64 bit targets. If all upper 32 bits are 1 // then inverting the bit table will make them 0 so that the table now fits in 32 bits. // Note that this does not change the number of bits in the bit table, it just takes // advantage of the fact that loading a 32 bit immediate into a 64 bit register zero // extends the immediate value to 64 bit. // if (~bitTable <= UINT32_MAX) { bitTable = ~bitTable; std::swap(bbCase0, bbCase1); } #endif // // Rewire the blocks as needed and figure out the condition to use for JCC. // GenCondition bbSwitchCondition; bbSwitch->bbJumpKind = BBJ_COND; comp->fgRemoveAllRefPreds(bbCase1, bbSwitch); comp->fgRemoveAllRefPreds(bbCase0, bbSwitch); if (bbSwitch->bbNext == bbCase0) { // GenCondition::C generates JC so we jump to bbCase1 when the bit is set bbSwitchCondition = GenCondition::C; bbSwitch->bbJumpDest = bbCase1; comp->fgAddRefPred(bbCase0, bbSwitch); comp->fgAddRefPred(bbCase1, bbSwitch); } else { assert(bbSwitch->bbNext == bbCase1); // GenCondition::NC generates JNC so we jump to bbCase0 when the bit is not set bbSwitchCondition = GenCondition::NC; bbSwitch->bbJumpDest = bbCase0; comp->fgAddRefPred(bbCase0, bbSwitch); comp->fgAddRefPred(bbCase1, bbSwitch); } // // Append BT(bitTable, switchValue) and JCC(condition) to the switch block. // var_types bitTableType = (bitCount <= (genTypeSize(TYP_INT) * 8)) ? TYP_INT : TYP_LONG; GenTree* bitTableIcon = comp->gtNewIconNode(bitTable, bitTableType); GenTree* bitTest = comp->gtNewOperNode(GT_BT, TYP_VOID, bitTableIcon, switchValue); bitTest->gtFlags |= GTF_SET_FLAGS; GenTreeCC* jcc = new (comp, GT_JCC) GenTreeCC(GT_JCC, bbSwitchCondition); jcc->gtFlags |= GTF_USE_FLAGS; LIR::AsRange(bbSwitch).InsertAfter(switchValue, bitTableIcon, bitTest, jcc); return true; #endif // TARGET_XARCH } // NOTE: this method deliberately does not update the call arg table. It must only // be used by NewPutArg and LowerArg; these functions are responsible for updating // the call arg table as necessary. void Lowering::ReplaceArgWithPutArgOrBitcast(GenTree** argSlot, GenTree* putArgOrBitcast) { assert(argSlot != nullptr); assert(*argSlot != nullptr); assert(putArgOrBitcast->OperIsPutArg() || putArgOrBitcast->OperIs(GT_BITCAST)); GenTree* arg = *argSlot; // Replace the argument with the putarg/copy *argSlot = putArgOrBitcast; putArgOrBitcast->AsOp()->gtOp1 = arg; // Insert the putarg/copy into the block BlockRange().InsertAfter(arg, putArgOrBitcast); } //------------------------------------------------------------------------ // NewPutArg: rewrites the tree to put an arg in a register or on the stack. // // Arguments: // call - the call whose arg is being rewritten. // arg - the arg being rewritten. // info - the fgArgTabEntry information for the argument. // type - the type of the argument. // // Return Value: // The new tree that was created to put the arg in the right place // or the incoming arg if the arg tree was not rewritten. // // Assumptions: // call, arg, and info must be non-null. // // Notes: // For System V systems with native struct passing (i.e. UNIX_AMD64_ABI defined) // this method allocates a single GT_PUTARG_REG for 1 eightbyte structs and a GT_FIELD_LIST of two GT_PUTARG_REGs // for two eightbyte structs. // // For STK passed structs the method generates GT_PUTARG_STK tree. For System V systems with native struct passing // (i.e. UNIX_AMD64_ABI defined) this method also sets the GC pointers count and the pointers // layout object, so the codegen of the GT_PUTARG_STK could use this for optimizing copying to the stack by value. // (using block copy primitives for non GC pointers and a single TARGET_POINTER_SIZE copy with recording GC info.) // GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* info, var_types type) { assert(call != nullptr); assert(arg != nullptr); assert(info != nullptr); GenTree* putArg = nullptr; bool isOnStack = (info->GetRegNum() == REG_STK); #ifdef TARGET_ARMARCH // Mark contained when we pass struct // GT_FIELD_LIST is always marked contained when it is generated if (type == TYP_STRUCT) { arg->SetContained(); if ((arg->OperGet() == GT_OBJ) && (arg->AsObj()->Addr()->OperGet() == GT_LCL_VAR_ADDR)) { MakeSrcContained(arg, arg->AsObj()->Addr()); } } #endif #if FEATURE_ARG_SPLIT // Struct can be split into register(s) and stack on ARM if (compFeatureArgSplit() && info->IsSplit()) { assert(arg->OperGet() == GT_OBJ || arg->OperGet() == GT_FIELD_LIST); // TODO: Need to check correctness for FastTailCall if (call->IsFastTailCall()) { #ifdef TARGET_ARM NYI_ARM("lower: struct argument by fast tail call"); #endif // TARGET_ARM } const unsigned slotNumber = info->GetByteOffset() / TARGET_POINTER_SIZE; DEBUG_ARG_SLOTS_ASSERT(slotNumber == info->slotNum); const bool putInIncomingArgArea = call->IsFastTailCall(); putArg = new (comp, GT_PUTARG_SPLIT) GenTreePutArgSplit(arg, info->GetByteOffset(), #if defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK) info->GetStackByteSize(), slotNumber, info->GetStackSlotsNumber(), #elif defined(DEBUG_ARG_SLOTS) && !defined(FEATURE_PUT_STRUCT_ARG_STK) slotNumber, #elif !defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK) info->GetStackByteSize(), #endif info->numRegs, call, putInIncomingArgArea); // If struct argument is morphed to GT_FIELD_LIST node(s), // we can know GC info by type of each GT_FIELD_LIST node. // So we skip setting GC Pointer info. // GenTreePutArgSplit* argSplit = putArg->AsPutArgSplit(); for (unsigned regIndex = 0; regIndex < info->numRegs; regIndex++) { argSplit->SetRegNumByIdx(info->GetRegNum(regIndex), regIndex); } if (arg->OperGet() == GT_OBJ) { ClassLayout* layout = arg->AsObj()->GetLayout(); // Set type of registers for (unsigned index = 0; index < info->numRegs; index++) { argSplit->m_regType[index] = layout->GetGCPtrType(index); } } else { unsigned regIndex = 0; for (GenTreeFieldList::Use& use : arg->AsFieldList()->Uses()) { if (regIndex >= info->numRegs) { break; } var_types regType = use.GetNode()->TypeGet(); // Account for the possibility that float fields may be passed in integer registers. if (varTypeIsFloating(regType) && !genIsValidFloatReg(argSplit->GetRegNumByIdx(regIndex))) { regType = (regType == TYP_FLOAT) ? TYP_INT : TYP_LONG; } argSplit->m_regType[regIndex] = regType; regIndex++; } // Clear the register assignment on the fieldList node, as these are contained. arg->SetRegNum(REG_NA); } } else #endif // FEATURE_ARG_SPLIT { if (!isOnStack) { #if FEATURE_MULTIREG_ARGS if ((info->numRegs > 1) && (arg->OperGet() == GT_FIELD_LIST)) { unsigned int regIndex = 0; for (GenTreeFieldList::Use& use : arg->AsFieldList()->Uses()) { regNumber argReg = info->GetRegNum(regIndex); GenTree* curOp = use.GetNode(); var_types curTyp = curOp->TypeGet(); // Create a new GT_PUTARG_REG node with op1 GenTree* newOper = comp->gtNewPutArgReg(curTyp, curOp, argReg); // Splice in the new GT_PUTARG_REG node in the GT_FIELD_LIST ReplaceArgWithPutArgOrBitcast(&use.NodeRef(), newOper); regIndex++; } // Just return arg. The GT_FIELD_LIST is not replaced. // Nothing more to do. return arg; } else #endif // FEATURE_MULTIREG_ARGS { putArg = comp->gtNewPutArgReg(type, arg, info->GetRegNum()); } } else { // Mark this one as tail call arg if it is a fast tail call. // This provides the info to put this argument in in-coming arg area slot // instead of in out-going arg area slot. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG // Make sure state is correct. The PUTARG_STK has TYP_VOID, as it doesn't produce // a result. So the type of its operand must be the correct type to push on the stack. // For a FIELD_LIST, this will be the type of the field (not the type of the arg), // but otherwise it is generally the type of the operand. info->checkIsStruct(); #endif if ((arg->OperGet() != GT_FIELD_LIST)) { #if defined(FEATURE_SIMD) && defined(FEATURE_PUT_STRUCT_ARG_STK) if (type == TYP_SIMD12) { #if !defined(TARGET_64BIT) assert(info->GetByteSize() == 12); #else // TARGET_64BIT if (compMacOsArm64Abi()) { assert(info->GetByteSize() == 12); } else { assert(info->GetByteSize() == 16); } #endif // TARGET_64BIT } else #endif // defined(FEATURE_SIMD) && defined(FEATURE_PUT_STRUCT_ARG_STK) { assert(genActualType(arg->TypeGet()) == type); } } const unsigned slotNumber = info->GetByteOffset() / TARGET_POINTER_SIZE; const bool putInIncomingArgArea = call->IsFastTailCall(); putArg = new (comp, GT_PUTARG_STK) GenTreePutArgStk(GT_PUTARG_STK, TYP_VOID, arg, info->GetByteOffset(), #if defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK) info->GetStackByteSize(), slotNumber, info->GetStackSlotsNumber(), #elif defined(DEBUG_ARG_SLOTS) && !defined(FEATURE_PUT_STRUCT_ARG_STK) slotNumber, #elif !defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK) info->GetStackByteSize(), #endif call, putInIncomingArgArea); #ifdef FEATURE_PUT_STRUCT_ARG_STK // If the ArgTabEntry indicates that this arg is a struct // get and store the number of slots that are references. // This is later used in the codegen for PUT_ARG_STK implementation // for struct to decide whether and how many single eight-byte copies // to be done (only for reference slots), so gcinfo is emitted. // For non-reference slots faster/smaller size instructions are used - // pair copying using XMM registers or rep mov instructions. if (info->isStruct) { // We use GT_OBJ only for non-lclVar, non-SIMD, non-FIELD_LIST struct arguments. if (arg->OperIsLocal()) { // This must have a type with a known size (SIMD or has been morphed to a primitive type). assert(arg->TypeGet() != TYP_STRUCT); } else if (arg->OperIs(GT_OBJ)) { assert(!varTypeIsSIMD(arg)); #ifdef TARGET_X86 // On x86 VM lies about the type of a struct containing a pointer sized // integer field by returning the type of its field as the type of struct. // Such struct can be passed in a register depending its position in // parameter list. VM does this unwrapping only one level and therefore // a type like Struct Foo { Struct Bar { int f}} awlays needs to be // passed on stack. Also, VM doesn't lie about type of such a struct // when it is a field of another struct. That is VM doesn't lie about // the type of Foo.Bar // // We now support the promotion of fields that are of type struct. // However we only support a limited case where the struct field has a // single field and that single field must be a scalar type. Say Foo.Bar // field is getting passed as a parameter to a call, Since it is a TYP_STRUCT, // as per x86 ABI it should always be passed on stack. Therefore GenTree // node under a PUTARG_STK could be GT_OBJ(GT_LCL_VAR_ADDR(v1)), where // local v1 could be a promoted field standing for Foo.Bar. Note that // the type of v1 will be the type of field of Foo.Bar.f when Foo is // promoted. That is v1 will be a scalar type. In this case we need to // pass v1 on stack instead of in a register. // // TODO-PERF: replace GT_OBJ(GT_LCL_VAR_ADDR(v1)) with v1 if v1 is // a scalar type and the width of GT_OBJ matches the type size of v1. // Note that this cannot be done till call node arguments are morphed // because we should not lose the fact that the type of argument is // a struct so that the arg gets correctly marked to be passed on stack. GenTree* objOp1 = arg->gtGetOp1(); if (objOp1->OperGet() == GT_LCL_VAR_ADDR) { unsigned lclNum = objOp1->AsLclVarCommon()->GetLclNum(); if (comp->lvaTable[lclNum].lvType != TYP_STRUCT) { comp->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } } #endif // TARGET_X86 } else if (!arg->OperIs(GT_FIELD_LIST)) { #ifdef TARGET_ARM assert((info->GetStackSlotsNumber() == 1) || ((arg->TypeGet() == TYP_DOUBLE) && (info->GetStackSlotsNumber() == 2))); #else assert(varTypeIsSIMD(arg) || (info->GetStackSlotsNumber() == 1)); #endif } } #endif // FEATURE_PUT_STRUCT_ARG_STK } } JITDUMP("new node is : "); DISPNODE(putArg); JITDUMP("\n"); if (arg->gtFlags & GTF_LATE_ARG) { putArg->gtFlags |= GTF_LATE_ARG; } return putArg; } //------------------------------------------------------------------------ // LowerArg: Lower one argument of a call. This entails splicing a "putarg" node between // the argument evaluation and the call. This is the point at which the source is // consumed and the value transitions from control of the register allocator to the calling // convention. // // Arguments: // call - The call node // ppArg - Pointer to the call argument pointer. We might replace the call argument by // changing *ppArg. // // Return Value: // None. // void Lowering::LowerArg(GenTreeCall* call, GenTree** ppArg) { GenTree* arg = *ppArg; JITDUMP("lowering arg : "); DISPNODE(arg); // No assignments should remain by Lowering. assert(!arg->OperIs(GT_ASG)); assert(!arg->OperIsPutArgStk()); // Assignments/stores at this level are not really placing an argument. // They are setting up temporary locals that will later be placed into // outgoing regs or stack. // Note that atomic ops may be stores and still produce a value. if (!arg->IsValue()) { assert((arg->OperIsStore() && !arg->IsValue()) || arg->IsArgPlaceHolderNode() || arg->IsNothingNode() || arg->OperIsCopyBlkOp()); return; } fgArgTabEntry* info = comp->gtArgEntryByNode(call, arg); assert(info->GetNode() == arg); var_types type = arg->TypeGet(); if (varTypeIsSmall(type)) { // Normalize 'type', it represents the item that we will be storing in the Outgoing Args type = TYP_INT; } #if defined(FEATURE_SIMD) #if defined(TARGET_X86) // Non-param TYP_SIMD12 local var nodes are massaged in Lower to TYP_SIMD16 to match their // allocated size (see lvSize()). However, when passing the variables as arguments, and // storing the variables to the outgoing argument area on the stack, we must use their // actual TYP_SIMD12 type, so exactly 12 bytes is allocated and written. if (type == TYP_SIMD16) { if ((arg->OperGet() == GT_LCL_VAR) || (arg->OperGet() == GT_STORE_LCL_VAR)) { const LclVarDsc* varDsc = comp->lvaGetDesc(arg->AsLclVarCommon()); type = varDsc->lvType; } else if (arg->OperIs(GT_SIMD, GT_HWINTRINSIC)) { GenTreeJitIntrinsic* jitIntrinsic = reinterpret_cast<GenTreeJitIntrinsic*>(arg); // For HWIntrinsic, there are some intrinsics like ExtractVector128 which have // a gtType of TYP_SIMD16 but a SimdSize of 32, so we need to include that in // the assert below. assert((jitIntrinsic->GetSimdSize() == 12) || (jitIntrinsic->GetSimdSize() == 16) || (jitIntrinsic->GetSimdSize() == 32)); if (jitIntrinsic->GetSimdSize() == 12) { type = TYP_SIMD12; } } } #elif defined(TARGET_AMD64) // TYP_SIMD8 parameters that are passed as longs if (type == TYP_SIMD8 && genIsValidIntReg(info->GetRegNum())) { GenTree* bitcast = comp->gtNewBitCastNode(TYP_LONG, arg); BlockRange().InsertAfter(arg, bitcast); *ppArg = arg = bitcast; assert(info->GetNode() == arg); type = TYP_LONG; } #endif // defined(TARGET_X86) #endif // defined(FEATURE_SIMD) // If we hit this we are probably double-lowering. assert(!arg->OperIsPutArg()); #if !defined(TARGET_64BIT) if (varTypeIsLong(type)) { noway_assert(arg->OperIs(GT_LONG)); GenTreeFieldList* fieldList = new (comp, GT_FIELD_LIST) GenTreeFieldList(); fieldList->AddFieldLIR(comp, arg->AsOp()->gtGetOp1(), 0, TYP_INT); fieldList->AddFieldLIR(comp, arg->AsOp()->gtGetOp2(), 4, TYP_INT); GenTree* newArg = NewPutArg(call, fieldList, info, type); if (info->GetRegNum() != REG_STK) { assert(info->numRegs == 2); // In the register argument case, NewPutArg replaces the original field list args with new // GT_PUTARG_REG nodes, inserts them in linear order and returns the field list. So the // only thing left to do is to insert the field list itself in linear order. assert(newArg == fieldList); BlockRange().InsertBefore(arg, newArg); } else { // For longs, we will replace the GT_LONG with a GT_FIELD_LIST, and put that under a PUTARG_STK. // Although the hi argument needs to be pushed first, that will be handled by the general case, // in which the fields will be reversed. assert(info->numSlots == 2); newArg->SetRegNum(REG_STK); BlockRange().InsertBefore(arg, fieldList, newArg); } *ppArg = newArg; assert(info->GetNode() == newArg); BlockRange().Remove(arg); } else #endif // !defined(TARGET_64BIT) { #ifdef TARGET_ARMARCH if (call->IsVarargs() || comp->opts.compUseSoftFP) { // For vararg call or on armel, reg args should be all integer. // Insert copies as needed to move float value to integer register. GenTree* newNode = LowerFloatArg(ppArg, info); if (newNode != nullptr) { type = newNode->TypeGet(); } } #endif // TARGET_ARMARCH GenTree* putArg = NewPutArg(call, arg, info, type); // In the case of register passable struct (in one or two registers) // the NewPutArg returns a new node (GT_PUTARG_REG or a GT_FIELD_LIST with two GT_PUTARG_REGs.) // If an extra node is returned, splice it in the right place in the tree. if (arg != putArg) { ReplaceArgWithPutArgOrBitcast(ppArg, putArg); } } } #ifdef TARGET_ARMARCH //------------------------------------------------------------------------ // LowerFloatArg: Lower float call arguments on the arm platform. // // Arguments: // arg - The arg node // info - call argument info // // Return Value: // Return nullptr, if no transformation was done; // return arg if there was in place transformation; // return a new tree if the root was changed. // // Notes: // This must handle scalar float arguments as well as GT_FIELD_LISTs // with floating point fields. // GenTree* Lowering::LowerFloatArg(GenTree** pArg, fgArgTabEntry* info) { GenTree* arg = *pArg; if (info->GetRegNum() != REG_STK) { if (arg->OperIs(GT_FIELD_LIST)) { // Transform fields that are passed as registers in place. regNumber currRegNumber = info->GetRegNum(); unsigned regIndex = 0; for (GenTreeFieldList::Use& use : arg->AsFieldList()->Uses()) { if (regIndex >= info->numRegs) { break; } GenTree* node = use.GetNode(); if (varTypeIsFloating(node)) { GenTree* intNode = LowerFloatArgReg(node, currRegNumber); assert(intNode != nullptr); ReplaceArgWithPutArgOrBitcast(&use.NodeRef(), intNode); } if (node->TypeGet() == TYP_DOUBLE) { currRegNumber = REG_NEXT(REG_NEXT(currRegNumber)); regIndex += 2; } else { currRegNumber = REG_NEXT(currRegNumber); regIndex += 1; } } // List fields were replaced in place. return arg; } else if (varTypeIsFloating(arg)) { GenTree* intNode = LowerFloatArgReg(arg, info->GetRegNum()); assert(intNode != nullptr); ReplaceArgWithPutArgOrBitcast(pArg, intNode); return *pArg; } } return nullptr; } //------------------------------------------------------------------------ // LowerFloatArgReg: Lower the float call argument node that is passed via register. // // Arguments: // arg - The arg node // regNum - register number // // Return Value: // Return new bitcast node, that moves float to int register. // GenTree* Lowering::LowerFloatArgReg(GenTree* arg, regNumber regNum) { var_types floatType = arg->TypeGet(); assert(varTypeIsFloating(floatType)); var_types intType = (floatType == TYP_DOUBLE) ? TYP_LONG : TYP_INT; GenTree* intArg = comp->gtNewBitCastNode(intType, arg); intArg->SetRegNum(regNum); #ifdef TARGET_ARM if (floatType == TYP_DOUBLE) { // A special case when we introduce TYP_LONG // during lowering for arm32 softFP to pass double // in int registers. assert(comp->opts.compUseSoftFP); regNumber nextReg = REG_NEXT(regNum); intArg->AsMultiRegOp()->gtOtherReg = nextReg; } #endif return intArg; } #endif // do lowering steps for each arg of a call void Lowering::LowerArgsForCall(GenTreeCall* call) { JITDUMP("objp:\n======\n"); if (call->gtCallThisArg != nullptr) { LowerArg(call, &call->gtCallThisArg->NodeRef()); } JITDUMP("\nargs:\n======\n"); for (GenTreeCall::Use& use : call->Args()) { LowerArg(call, &use.NodeRef()); } JITDUMP("\nlate:\n======\n"); for (GenTreeCall::Use& use : call->LateArgs()) { LowerArg(call, &use.NodeRef()); } } // helper that create a node representing a relocatable physical address computation GenTree* Lowering::AddrGen(ssize_t addr) { // this should end up in codegen as : instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, reg, addr) GenTree* result = comp->gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR); return result; } // variant that takes a void* GenTree* Lowering::AddrGen(void* addr) { return AddrGen((ssize_t)addr); } // do lowering steps for a call // this includes: // - adding the placement nodes (either stack or register variety) for arguments // - lowering the expression that calculates the target address // - adding nodes for other operations that occur after the call sequence starts and before // control transfer occurs (profiling and tail call helpers, pinvoke incantations) // void Lowering::LowerCall(GenTree* node) { GenTreeCall* call = node->AsCall(); JITDUMP("lowering call (before):\n"); DISPTREERANGE(BlockRange(), call); JITDUMP("\n"); call->ClearOtherRegs(); LowerArgsForCall(call); // note that everything generated from this point might run AFTER the outgoing args are placed GenTree* controlExpr = nullptr; bool callWasExpandedEarly = false; // for x86, this is where we record ESP for checking later to make sure stack is balanced // Check for Delegate.Invoke(). If so, we inline it. We get the // target-object and target-function from the delegate-object, and do // an indirect call. if (call->IsDelegateInvoke()) { controlExpr = LowerDelegateInvoke(call); } else { // Virtual and interface calls switch (call->gtFlags & GTF_CALL_VIRT_KIND_MASK) { case GTF_CALL_VIRT_STUB: controlExpr = LowerVirtualStubCall(call); break; case GTF_CALL_VIRT_VTABLE: assert(call->IsVirtualVtable()); if (!call->IsExpandedEarly()) { assert(call->gtControlExpr == nullptr); controlExpr = LowerVirtualVtableCall(call); } else { callWasExpandedEarly = true; controlExpr = call->gtControlExpr; } break; case GTF_CALL_NONVIRT: if (call->IsUnmanaged()) { controlExpr = LowerNonvirtPinvokeCall(call); } else if (call->gtCallType == CT_INDIRECT) { controlExpr = LowerIndirectNonvirtCall(call); } else { controlExpr = LowerDirectCall(call); } break; default: noway_assert(!"strange call type"); break; } } // Indirect calls should always go through GenTreeCall::gtCallAddr and // should never have a control expression as well. assert((call->gtCallType != CT_INDIRECT) || (controlExpr == nullptr)); if (call->IsTailCallViaJitHelper()) { // Either controlExpr or gtCallAddr must contain real call target. if (controlExpr == nullptr) { assert(call->gtCallType == CT_INDIRECT); assert(call->gtCallAddr != nullptr); controlExpr = call->gtCallAddr; } controlExpr = LowerTailCallViaJitHelper(call, controlExpr); } // Check if we need to thread a newly created controlExpr into the LIR // if ((controlExpr != nullptr) && !callWasExpandedEarly) { LIR::Range controlExprRange = LIR::SeqTree(comp, controlExpr); JITDUMP("results of lowering call:\n"); DISPRANGE(controlExprRange); ContainCheckRange(controlExprRange); BlockRange().InsertBefore(call, std::move(controlExprRange)); call->gtControlExpr = controlExpr; } if (comp->opts.IsCFGEnabled()) { LowerCFGCall(call); } if (call->IsFastTailCall()) { // Lower fast tail call can introduce new temps to set up args correctly for Callee. // This involves patching LCL_VAR and LCL_VAR_ADDR nodes holding Caller stack args // and replacing them with a new temp. Control expr also can contain nodes that need // to be patched. // Therefore lower fast tail call must be done after controlExpr is inserted into LIR. // There is one side effect which is flipping the order of PME and control expression // since LowerFastTailCall calls InsertPInvokeMethodEpilog. LowerFastTailCall(call); } if (varTypeIsStruct(call)) { LowerCallStruct(call); } ContainCheckCallOperands(call); JITDUMP("lowering call (after):\n"); DISPTREERANGE(BlockRange(), call); JITDUMP("\n"); } // Inserts profiler hook, GT_PROF_HOOK for a tail call node. // // AMD64: // We need to insert this after all nested calls, but before all the arguments to this call have been set up. // To do this, we look for the first GT_PUTARG_STK or GT_PUTARG_REG, and insert the hook immediately before // that. If there are no args, then it should be inserted before the call node. // // For example: // * stmtExpr void (top level) (IL 0x000...0x010) // arg0 SETUP | /--* argPlace ref REG NA $c5 // this in rcx | | /--* argPlace ref REG NA $c1 // | | | /--* call ref System.Globalization.CultureInfo.get_InvariantCulture $c2 // arg1 SETUP | | +--* st.lclVar ref V02 tmp1 REG NA $c2 // | | | /--* lclVar ref V02 tmp1 u : 2 (last use) REG NA $c2 // arg1 in rdx | | +--* putarg_reg ref REG NA // | | | /--* lclVar ref V00 arg0 u : 2 (last use) REG NA $80 // this in rcx | | +--* putarg_reg ref REG NA // | | /--* call nullcheck ref System.String.ToLower $c5 // | | { * stmtExpr void (embedded)(IL 0x000... ? ? ? ) // | | { \--* prof_hook void REG NA // arg0 in rcx | +--* putarg_reg ref REG NA // control expr | +--* const(h) long 0x7ffe8e910e98 ftn REG NA // \--* call void System.Runtime.Remoting.Identity.RemoveAppNameOrAppGuidIfNecessary $VN.Void // // In this case, the GT_PUTARG_REG src is a nested call. We need to put the instructions after that call // (as shown). We assume that of all the GT_PUTARG_*, only the first one can have a nested call. // // X86: // Insert the profiler hook immediately before the call. The profiler hook will preserve // all argument registers (ECX, EDX), but nothing else. // // Params: // callNode - tail call node // insertionPoint - if non-null, insert the profiler hook before this point. // If null, insert the profiler hook before args are setup // but after all arg side effects are computed. // void Lowering::InsertProfTailCallHook(GenTreeCall* call, GenTree* insertionPoint) { assert(call->IsTailCall()); assert(comp->compIsProfilerHookNeeded()); #if defined(TARGET_X86) if (insertionPoint == nullptr) { insertionPoint = call; } #else // !defined(TARGET_X86) if (insertionPoint == nullptr) { for (GenTreeCall::Use& use : call->Args()) { assert(!use.GetNode()->OperIs(GT_PUTARG_REG)); // We don't expect to see these in gtCallArgs if (use.GetNode()->OperIs(GT_PUTARG_STK)) { // found it insertionPoint = use.GetNode(); break; } } if (insertionPoint == nullptr) { for (GenTreeCall::Use& use : call->LateArgs()) { if (use.GetNode()->OperIs(GT_PUTARG_REG, GT_PUTARG_STK)) { // found it insertionPoint = use.GetNode(); break; } } // If there are no args, insert before the call node if (insertionPoint == nullptr) { insertionPoint = call; } } } #endif // !defined(TARGET_X86) assert(insertionPoint != nullptr); GenTree* profHookNode = new (comp, GT_PROF_HOOK) GenTree(GT_PROF_HOOK, TYP_VOID); BlockRange().InsertBefore(insertionPoint, profHookNode); } //------------------------------------------------------------------------ // LowerFastTailCall: Lower a call node dispatched as a fast tailcall (epilog + // jmp). // // Arguments: // call - the call node that is being dispatched as a fast tailcall. // // Assumptions: // call must be non-null. // // Notes: // For fast tail calls it is necessary to set up stack args in the incoming // arg stack space area. When args passed also come from this area we may // run into problems because we may end up overwriting the stack slot before // using it. For example, for foo(a, b) { return bar(b, a); }, if a and b // are on incoming arg stack space in foo they need to be swapped in this // area for the call to bar. This function detects this situation and // introduces a temp when an outgoing argument would overwrite a later-used // incoming argument. // // This function also handles inserting necessary profiler hooks and pinvoke // method epilogs in case there are inlined pinvokes. void Lowering::LowerFastTailCall(GenTreeCall* call) { #if FEATURE_FASTTAILCALL // Tail call restrictions i.e. conditions under which tail prefix is ignored. // Most of these checks are already done by importer or fgMorphTailCall(). // This serves as a double sanity check. assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); // tail calls from synchronized methods assert(!comp->opts.IsReversePInvoke()); // tail calls reverse pinvoke assert(!call->IsUnmanaged()); // tail calls to unamanaged methods assert(!comp->compLocallocUsed); // tail call from methods that also do localloc #ifdef TARGET_AMD64 assert(!comp->getNeedsGSSecurityCookie()); // jit64 compat: tail calls from methods that need GS check #endif // TARGET_AMD64 // We expect to see a call that meets the following conditions assert(call->IsFastTailCall()); // VM cannot use return address hijacking when A() and B() tail call each // other in mutual recursion. Therefore, this block is reachable through // a GC-safe point or the whole method is marked as fully interruptible. // // TODO-Cleanup: // optReachWithoutCall() depends on the fact that loop headers blocks // will have a block number > fgLastBB. These loop headers gets added // after dominator computation and get skipped by OptReachWithoutCall(). // The below condition cannot be asserted in lower because fgSimpleLowering() // can add a new basic block for range check failure which becomes // fgLastBB with block number > loop header block number. // assert((comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT) || // !comp->optReachWithoutCall(comp->fgFirstBB, comp->compCurBB) || comp->GetInterruptible()); // If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that // a method returns. This is a case of caller method has both PInvokes and tail calls. if (comp->compMethodRequiresPInvokeFrame()) { InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(call)); } // Args for tail call are setup in incoming arg area. The gc-ness of args of // caller and callee (which being tail called) may not match. Therefore, everything // from arg setup until the epilog need to be non-interuptible by GC. This is // achieved by inserting GT_START_NONGC before the very first GT_PUTARG_STK node // of call is setup. Note that once a stack arg is setup, it cannot have nested // calls subsequently in execution order to setup other args, because the nested // call could over-write the stack arg that is setup earlier. ArrayStack<GenTree*> putargs(comp->getAllocator(CMK_ArrayStack)); for (GenTreeCall::Use& use : call->Args()) { if (use.GetNode()->OperIs(GT_PUTARG_STK)) { putargs.Push(use.GetNode()); } } for (GenTreeCall::Use& use : call->LateArgs()) { if (use.GetNode()->OperIs(GT_PUTARG_STK)) { putargs.Push(use.GetNode()); } } GenTree* startNonGCNode = nullptr; if (!putargs.Empty()) { // Get the earliest operand of the first PUTARG_STK node. We will make // the requred copies of args before this node. bool unused; GenTree* insertionPoint = BlockRange().GetTreeRange(putargs.Bottom(), &unused).FirstNode(); // Insert GT_START_NONGC node before we evaluate the PUTARG_STK args. // Note that if there are no args to be setup on stack, no need to // insert GT_START_NONGC node. startNonGCNode = new (comp, GT_START_NONGC) GenTree(GT_START_NONGC, TYP_VOID); BlockRange().InsertBefore(insertionPoint, startNonGCNode); // Gc-interruptability in the following case: // foo(a, b, c, d, e) { bar(a, b, c, d, e); } // bar(a, b, c, d, e) { foo(a, b, d, d, e); } // // Since the instruction group starting from the instruction that sets up first // stack arg to the end of the tail call is marked as non-gc interruptible, // this will form a non-interruptible tight loop causing gc-starvation. To fix // this we insert GT_NO_OP as embedded stmt before GT_START_NONGC, if the method // has a single basic block and is not a GC-safe point. The presence of a single // nop outside non-gc interruptible region will prevent gc starvation. if ((comp->fgBBcount == 1) && !(comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT)) { assert(comp->fgFirstBB == comp->compCurBB); GenTree* noOp = new (comp, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); BlockRange().InsertBefore(startNonGCNode, noOp); } // Since this is a fast tailcall each PUTARG_STK will place the argument in the // _incoming_ arg space area. This will effectively overwrite our already existing // incoming args that live in that area. If we have later uses of those args, this // is a problem. We introduce a defensive copy into a temp here of those args that // potentially may cause problems. for (int i = 0; i < putargs.Height(); i++) { GenTreePutArgStk* put = putargs.Bottom(i)->AsPutArgStk(); unsigned int overwrittenStart = put->getArgOffset(); unsigned int overwrittenEnd = overwrittenStart + put->GetStackByteSize(); int baseOff = -1; // Stack offset of first arg on stack for (unsigned callerArgLclNum = 0; callerArgLclNum < comp->info.compArgsCount; callerArgLclNum++) { LclVarDsc* callerArgDsc = comp->lvaGetDesc(callerArgLclNum); if (callerArgDsc->lvIsRegArg) { continue; } unsigned int argStart; unsigned int argEnd; #if defined(TARGET_AMD64) if (TargetOS::IsWindows) { // On Windows x64, the argument position determines the stack slot uniquely, and even the // register args take up space in the stack frame (shadow space). argStart = callerArgLclNum * TARGET_POINTER_SIZE; argEnd = argStart + static_cast<unsigned int>(callerArgDsc->lvArgStackSize()); } else #endif // TARGET_AMD64 { assert(callerArgDsc->GetStackOffset() != BAD_STK_OFFS); if (baseOff == -1) { baseOff = callerArgDsc->GetStackOffset(); } // On all ABIs where we fast tail call the stack args should come in order. assert(baseOff <= callerArgDsc->GetStackOffset()); // Compute offset of this stack argument relative to the first stack arg. // This will be its offset into the incoming arg space area. argStart = static_cast<unsigned int>(callerArgDsc->GetStackOffset() - baseOff); argEnd = argStart + comp->lvaLclSize(callerArgLclNum); } // If ranges do not overlap then this PUTARG_STK will not mess up the arg. if ((overwrittenEnd <= argStart) || (overwrittenStart >= argEnd)) { continue; } // Codegen cannot handle a partially overlapping copy. For // example, if we have // bar(S16 stack, S32 stack2) // foo(S32 stack, S32 stack2) { bar(..., stack) } // then we may end up having to move 'stack' in foo 16 bytes // ahead. It is possible that this PUTARG_STK is the only use, // in which case we will need to introduce a temp, so look for // uses starting from it. Note that we assume that in-place // copies are OK. GenTree* lookForUsesFrom = put->gtNext; if (overwrittenStart != argStart) { lookForUsesFrom = insertionPoint; } RehomeArgForFastTailCall(callerArgLclNum, insertionPoint, lookForUsesFrom, call); // The above call can introduce temps and invalidate the pointer. callerArgDsc = comp->lvaGetDesc(callerArgLclNum); // For promoted locals we have more work to do as its fields could also have been invalidated. if (!callerArgDsc->lvPromoted) { continue; } unsigned int fieldsFirst = callerArgDsc->lvFieldLclStart; unsigned int fieldsEnd = fieldsFirst + callerArgDsc->lvFieldCnt; for (unsigned int j = fieldsFirst; j < fieldsEnd; j++) { RehomeArgForFastTailCall(j, insertionPoint, lookForUsesFrom, call); } } } } // Insert GT_PROF_HOOK node to emit profiler tail call hook. This should be // inserted before the args are setup but after the side effects of args are // computed. That is, GT_PROF_HOOK node needs to be inserted before GT_START_NONGC // node if one exists. if (comp->compIsProfilerHookNeeded()) { InsertProfTailCallHook(call, startNonGCNode); } #else // !FEATURE_FASTTAILCALL // Platform does not implement fast tail call mechanism. This cannot be // reached because we always choose to do a tailcall via helper on those // platforms (or no tailcall at all). unreached(); #endif } // //------------------------------------------------------------------------ // RehomeArgForFastTailCall: Introduce temps for args that may be overwritten // during fast tailcall sequence. // // Arguments: // lclNum - the lcl num of the arg that will be overwritten. // insertTempBefore - the node at which to copy the arg into a temp. // lookForUsesStart - the node where to start scanning and replacing uses of // the arg specified by lclNum. // callNode - the call node that is being dispatched as a fast tailcall. // // Assumptions: // all args must be non-null. // // Notes: // This function scans for uses of the arg specified by lclNum starting // from the lookForUsesStart node. If it finds any uses it introduces a temp // for this argument and updates uses to use this instead. In the situation // where it introduces a temp it can thus invalidate pointers to other // locals. // void Lowering::RehomeArgForFastTailCall(unsigned int lclNum, GenTree* insertTempBefore, GenTree* lookForUsesStart, GenTreeCall* callNode) { unsigned int tmpLclNum = BAD_VAR_NUM; for (GenTree* treeNode = lookForUsesStart; treeNode != callNode; treeNode = treeNode->gtNext) { if (!treeNode->OperIsLocal() && !treeNode->OperIsLocalAddr()) { continue; } GenTreeLclVarCommon* lcl = treeNode->AsLclVarCommon(); if (lcl->GetLclNum() != lclNum) { continue; } // Create tmp and use it in place of callerArgDsc if (tmpLclNum == BAD_VAR_NUM) { tmpLclNum = comp->lvaGrabTemp(true DEBUGARG("Fast tail call lowering is creating a new local variable")); LclVarDsc* callerArgDsc = comp->lvaGetDesc(lclNum); var_types tmpTyp = genActualType(callerArgDsc->TypeGet()); comp->lvaTable[tmpLclNum].lvType = tmpTyp; // TODO-CQ: I don't see why we should copy doNotEnreg. comp->lvaTable[tmpLclNum].lvDoNotEnregister = callerArgDsc->lvDoNotEnregister; #ifdef DEBUG comp->lvaTable[tmpLclNum].SetDoNotEnregReason(callerArgDsc->GetDoNotEnregReason()); #endif // DEBUG GenTree* value = comp->gtNewLclvNode(lclNum, tmpTyp); if (tmpTyp == TYP_STRUCT) { comp->lvaSetStruct(tmpLclNum, comp->lvaGetStruct(lclNum), false); } GenTreeLclVar* storeLclVar = comp->gtNewStoreLclVar(tmpLclNum, value); BlockRange().InsertBefore(insertTempBefore, LIR::SeqTree(comp, storeLclVar)); ContainCheckRange(value, storeLclVar); LowerNode(storeLclVar); } lcl->SetLclNum(tmpLclNum); } } //------------------------------------------------------------------------ // LowerTailCallViaJitHelper: lower a call via the tailcall JIT helper. Morph // has already inserted tailcall helper special arguments. This function inserts // actual data for some placeholders. This function is only used on x86. // // Lower // tail.call(<function args>, int numberOfOldStackArgs, int dummyNumberOfNewStackArgs, int flags, void* dummyArg) // as // JIT_TailCall(<function args>, int numberOfOldStackArgsWords, int numberOfNewStackArgsWords, int flags, void* // callTarget) // Note that the special arguments are on the stack, whereas the function arguments follow the normal convention. // // Also inserts PInvoke method epilog if required. // // Arguments: // call - The call node // callTarget - The real call target. This is used to replace the dummyArg during lowering. // // Return Value: // Returns control expression tree for making a call to helper Jit_TailCall. // GenTree* Lowering::LowerTailCallViaJitHelper(GenTreeCall* call, GenTree* callTarget) { // Tail call restrictions i.e. conditions under which tail prefix is ignored. // Most of these checks are already done by importer or fgMorphTailCall(). // This serves as a double sanity check. assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); // tail calls from synchronized methods assert(!call->IsUnmanaged()); // tail calls to unamanaged methods assert(!comp->compLocallocUsed); // tail call from methods that also do localloc // We expect to see a call that meets the following conditions assert(call->IsTailCallViaJitHelper()); assert(callTarget != nullptr); // The TailCall helper call never returns to the caller and is not GC interruptible. // Therefore the block containing the tail call should be a GC safe point to avoid // GC starvation. It is legal for the block to be unmarked iff the entry block is a // GC safe point, as the entry block trivially dominates every reachable block. assert((comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT) || (comp->fgFirstBB->bbFlags & BBF_GC_SAFE_POINT)); // If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that // a method returns. This is a case of caller method has both PInvokes and tail calls. if (comp->compMethodRequiresPInvokeFrame()) { InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(call)); } // Remove gtCallAddr from execution order if present. if (call->gtCallType == CT_INDIRECT) { assert(call->gtCallAddr != nullptr); bool isClosed; LIR::ReadOnlyRange callAddrRange = BlockRange().GetTreeRange(call->gtCallAddr, &isClosed); assert(isClosed); BlockRange().Remove(std::move(callAddrRange)); } // The callTarget tree needs to be sequenced. LIR::Range callTargetRange = LIR::SeqTree(comp, callTarget); // Verify the special args are what we expect, and replace the dummy args with real values. // We need to figure out the size of the outgoing stack arguments, not including the special args. // The number of 4-byte words is passed to the helper for the incoming and outgoing argument sizes. // This number is exactly the next slot number in the call's argument info struct. unsigned nNewStkArgsBytes = call->fgArgInfo->GetNextSlotByteOffset(); const int wordSize = 4; unsigned nNewStkArgsWords = nNewStkArgsBytes / wordSize; DEBUG_ARG_SLOTS_ASSERT(call->fgArgInfo->GetNextSlotNum() == nNewStkArgsWords); assert(nNewStkArgsWords >= 4); // There must be at least the four special stack args. nNewStkArgsWords -= 4; unsigned numArgs = call->fgArgInfo->ArgCount(); fgArgTabEntry* argEntry; // arg 0 == callTarget. argEntry = comp->gtArgEntryByArgNum(call, numArgs - 1); assert(argEntry != nullptr); GenTree* arg0 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1(); ContainCheckRange(callTargetRange); BlockRange().InsertAfter(arg0, std::move(callTargetRange)); bool isClosed; LIR::ReadOnlyRange secondArgRange = BlockRange().GetTreeRange(arg0, &isClosed); assert(isClosed); BlockRange().Remove(std::move(secondArgRange)); argEntry->GetNode()->AsPutArgStk()->gtOp1 = callTarget; // arg 1 == flags argEntry = comp->gtArgEntryByArgNum(call, numArgs - 2); assert(argEntry != nullptr); GenTree* arg1 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1(); assert(arg1->gtOper == GT_CNS_INT); ssize_t tailCallHelperFlags = 1 | // always restore EDI,ESI,EBX (call->IsVirtualStub() ? 0x2 : 0x0); // Stub dispatch flag arg1->AsIntCon()->gtIconVal = tailCallHelperFlags; // arg 2 == numberOfNewStackArgsWords argEntry = comp->gtArgEntryByArgNum(call, numArgs - 3); assert(argEntry != nullptr); GenTree* arg2 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1(); assert(arg2->gtOper == GT_CNS_INT); arg2->AsIntCon()->gtIconVal = nNewStkArgsWords; #ifdef DEBUG // arg 3 == numberOfOldStackArgsWords argEntry = comp->gtArgEntryByArgNum(call, numArgs - 4); assert(argEntry != nullptr); GenTree* arg3 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1(); assert(arg3->gtOper == GT_CNS_INT); #endif // DEBUG // Transform this call node into a call to Jit tail call helper. call->gtCallType = CT_HELPER; call->gtCallMethHnd = comp->eeFindHelper(CORINFO_HELP_TAILCALL); call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK; // Lower this as if it were a pure helper call. call->gtCallMoreFlags &= ~(GTF_CALL_M_TAILCALL | GTF_CALL_M_TAILCALL_VIA_JIT_HELPER); GenTree* result = LowerDirectCall(call); // Now add back tail call flags for identifying this node as tail call dispatched via helper. call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL | GTF_CALL_M_TAILCALL_VIA_JIT_HELPER; #ifdef PROFILING_SUPPORTED // Insert profiler tail call hook if needed. // Since we don't know the insertion point, pass null for second param. if (comp->compIsProfilerHookNeeded()) { InsertProfTailCallHook(call, nullptr); } #endif // PROFILING_SUPPORTED return result; } //------------------------------------------------------------------------ // LowerCFGCall: Potentially lower a call to use control-flow guard. This // expands indirect calls into either a validate+call sequence or to a dispatch // helper taking the original target in a special register. // // Arguments: // call - The call node // void Lowering::LowerCFGCall(GenTreeCall* call) { assert(!call->IsHelperCall(comp, CORINFO_HELP_DISPATCH_INDIRECT_CALL)); if (call->IsHelperCall(comp, CORINFO_HELP_VALIDATE_INDIRECT_CALL)) { return; } GenTree* callTarget = call->gtCallType == CT_INDIRECT ? call->gtCallAddr : call->gtControlExpr; if ((callTarget == nullptr) || callTarget->IsIntegralConst()) { // This is a direct call, no CFG check is necessary. return; } CFGCallKind cfgKind = call->GetCFGCallKind(); switch (cfgKind) { case CFGCallKind::ValidateAndCall: { // To safely apply CFG we need to generate a very specific pattern: // in particular, it is a safety issue to allow the JIT to reload // the call target from memory between calling // CORINFO_HELP_VALIDATE_INDIRECT_CALL and the target. This is // something that would easily occur in debug codegen if we // produced high-level IR. Instead we will use a GT_PHYSREG node // to get the target back from the register that contains the target. // // Additionally, the validator does not preserve all arg registers, // so we have to move all GT_PUTARG_REG nodes that would otherwise // be trashed ahead. The JIT also has an internal invariant that // once GT_PUTARG nodes start to appear in LIR, the call is coming // up. To avoid breaking this invariant we move _all_ GT_PUTARG // nodes (in particular, GC info reporting relies on this). // // To sum up, we end up transforming // // ta... = <early args> // tb... = <late args> // tc = callTarget // GT_CALL tc, ta..., tb... // // into // // ta... = <early args> (without GT_PUTARG_* nodes) // tb = callTarget // GT_CALL CORINFO_HELP_VALIDATE_INDIRECT_CALL, tb // tc = GT_PHYSREG REG_VALIDATE_INDIRECT_CALL_ADDR (preserved by helper) // td = <moved GT_PUTARG_* nodes> // GT_CALL tb, ta..., td.. // GenTree* regNode = PhysReg(REG_VALIDATE_INDIRECT_CALL_ADDR, TYP_I_IMPL); LIR::Use useOfTar; bool gotUse = BlockRange().TryGetUse(callTarget, &useOfTar); assert(gotUse); useOfTar.ReplaceWith(regNode); GenTree* targetPlaceholder = comp->gtNewZeroConNode(callTarget->TypeGet()); // Add the call to the validator. Use a placeholder for the target while we // morph, sequence and lower, to avoid redoing that for the actual target. GenTreeCall::Use* args = comp->gtNewCallArgs(targetPlaceholder); GenTreeCall* validate = comp->gtNewHelperCallNode(CORINFO_HELP_VALIDATE_INDIRECT_CALL, TYP_VOID, args); comp->fgMorphTree(validate); LIR::Range validateRange = LIR::SeqTree(comp, validate); GenTree* validateFirst = validateRange.FirstNode(); GenTree* validateLast = validateRange.LastNode(); // Insert the validator with the call target before the late args. BlockRange().InsertBefore(call, std::move(validateRange)); // Swap out the target gotUse = BlockRange().TryGetUse(targetPlaceholder, &useOfTar); assert(gotUse); useOfTar.ReplaceWith(callTarget); targetPlaceholder->SetUnusedValue(); LowerRange(validateFirst, validateLast); // Insert the PHYSREG node that we must load right after validation. BlockRange().InsertAfter(validate, regNode); LowerNode(regNode); // Finally move all GT_PUTARG_* nodes for (GenTreeCall::Use& use : call->Args()) { GenTree* node = use.GetNode(); if (!node->IsValue()) { // Non-value nodes in early args are setup nodes for late args. continue; } assert(node->OperIsPutArg() || node->OperIsFieldList()); MoveCFGCallArg(call, node); } for (GenTreeCall::Use& use : call->LateArgs()) { GenTree* node = use.GetNode(); assert(node->OperIsPutArg() || node->OperIsFieldList()); MoveCFGCallArg(call, node); } break; } case CFGCallKind::Dispatch: { #ifdef REG_DISPATCH_INDIRECT_CALL_ADDR // Now insert the call target as an extra argument. // // First append the early placeholder arg GenTreeCall::Use** earlySlot = &call->gtCallArgs; unsigned int index = call->gtCallThisArg != nullptr ? 1 : 0; while (*earlySlot != nullptr) { earlySlot = &(*earlySlot)->NextRef(); index++; } assert(index == call->fgArgInfo->ArgCount()); GenTree* placeHolder = comp->gtNewArgPlaceHolderNode(callTarget->TypeGet(), NO_CLASS_HANDLE); placeHolder->gtFlags |= GTF_LATE_ARG; *earlySlot = comp->gtNewCallArgs(placeHolder); // Append the late actual arg GenTreeCall::Use** lateSlot = &call->gtCallLateArgs; unsigned int lateIndex = 0; while (*lateSlot != nullptr) { lateSlot = &(*lateSlot)->NextRef(); lateIndex++; } *lateSlot = comp->gtNewCallArgs(callTarget); // Add an entry into the arg info regNumber regNum = REG_DISPATCH_INDIRECT_CALL_ADDR; unsigned numRegs = 1; unsigned byteSize = TARGET_POINTER_SIZE; unsigned byteAlignment = TARGET_POINTER_SIZE; bool isStruct = false; bool isFloatHfa = false; bool isVararg = false; fgArgTabEntry* entry = call->fgArgInfo->AddRegArg(index, placeHolder, *earlySlot, regNum, numRegs, byteSize, byteAlignment, isStruct, isFloatHfa, isVararg UNIX_AMD64_ABI_ONLY_ARG(REG_STK) UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(nullptr)); entry->lateUse = *lateSlot; entry->SetLateArgInx(lateIndex); // Lower the newly added args now that call is updated LowerArg(call, &(*earlySlot)->NodeRef()); LowerArg(call, &(*lateSlot)->NodeRef()); // Finally update the call to be a helper call call->gtCallType = CT_HELPER; call->gtCallMethHnd = comp->eeFindHelper(CORINFO_HELP_DISPATCH_INDIRECT_CALL); call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK; #ifdef FEATURE_READYTORUN call->gtEntryPoint.addr = nullptr; call->gtEntryPoint.accessType = IAT_VALUE; #endif // Now relower the call target call->gtControlExpr = LowerDirectCall(call); if (call->gtControlExpr != nullptr) { LIR::Range dispatchControlExprRange = LIR::SeqTree(comp, call->gtControlExpr); ContainCheckRange(dispatchControlExprRange); BlockRange().InsertBefore(call, std::move(dispatchControlExprRange)); } #else assert(!"Unexpected CFGCallKind::Dispatch for platform without dispatcher"); #endif break; } default: unreached(); } } //------------------------------------------------------------------------ // IsInvariantInRange: Check if a node is invariant in the specified range. In // other words, can 'node' be moved to right before 'endExclusive' without its // computation changing values? // // Arguments: // node - The node. // endExclusive - The exclusive end of the range to check invariance for. // bool Lowering::IsInvariantInRange(GenTree* node, GenTree* endExclusive) { assert(node->Precedes(endExclusive)); if (node->IsInvariant()) { return true; } if (!node->IsValue()) { return false; } if (node->OperIsLocal()) { GenTreeLclVarCommon* lcl = node->AsLclVarCommon(); LclVarDsc* desc = comp->lvaGetDesc(lcl); if (desc->IsAddressExposed()) { return false; } // Currently, non-address exposed locals have the property that their // use occurs at the user, so no further interference check is // necessary. return true; } return false; } //------------------------------------------------------------------------ // MoveCFGCallArg: Given a call that will be CFG transformed using the // validate+call scheme, and an argument GT_PUTARG_* or GT_FIELD_LIST node, // move that node right before the call. // // Arguments: // call - The call that is being CFG transformed // node - The argument node // // Remarks: // We can always move the GT_PUTARG_* node further ahead as the side-effects // of these nodes are handled by LSRA. However, the operands of these nodes // are not always safe to move further ahead; for invariant operands, we // move them ahead as well to shorten the lifetime of these values. // void Lowering::MoveCFGCallArg(GenTreeCall* call, GenTree* node) { assert(node->OperIsPutArg() || node->OperIsFieldList()); if (node->OperIsFieldList()) { JITDUMP("Node is a GT_FIELD_LIST; moving all operands\n"); for (GenTreeFieldList::Use& operand : node->AsFieldList()->Uses()) { assert(operand.GetNode()->OperIsPutArg()); MoveCFGCallArg(call, operand.GetNode()); } } else { GenTree* operand = node->AsOp()->gtGetOp1(); JITDUMP("Checking if we can move operand of GT_PUTARG_* node:\n"); DISPTREE(operand); if (((operand->gtFlags & GTF_ALL_EFFECT) == 0) && IsInvariantInRange(operand, call)) { JITDUMP("...yes, moving to after validator call\n"); BlockRange().Remove(operand); BlockRange().InsertBefore(call, operand); } else { JITDUMP("...no, operand has side effects or is not invariant\n"); } } JITDUMP("Moving\n"); DISPTREE(node); JITDUMP("\n"); BlockRange().Remove(node); BlockRange().InsertBefore(call, node); } #ifndef TARGET_64BIT //------------------------------------------------------------------------ // Lowering::DecomposeLongCompare: Decomposes a TYP_LONG compare node. // // Arguments: // cmp - the compare node // // Return Value: // The next node to lower. // // Notes: // This is done during lowering because DecomposeLongs handles only nodes // that produce TYP_LONG values. Compare nodes may consume TYP_LONG values // but produce TYP_INT values. // GenTree* Lowering::DecomposeLongCompare(GenTree* cmp) { assert(cmp->gtGetOp1()->TypeGet() == TYP_LONG); GenTree* src1 = cmp->gtGetOp1(); GenTree* src2 = cmp->gtGetOp2(); assert(src1->OperIs(GT_LONG)); assert(src2->OperIs(GT_LONG)); GenTree* loSrc1 = src1->gtGetOp1(); GenTree* hiSrc1 = src1->gtGetOp2(); GenTree* loSrc2 = src2->gtGetOp1(); GenTree* hiSrc2 = src2->gtGetOp2(); BlockRange().Remove(src1); BlockRange().Remove(src2); genTreeOps condition = cmp->OperGet(); GenTree* loCmp; GenTree* hiCmp; if (cmp->OperIs(GT_EQ, GT_NE)) { // // Transform (x EQ|NE y) into (((x.lo XOR y.lo) OR (x.hi XOR y.hi)) EQ|NE 0). If y is 0 then this can // be reduced to just ((x.lo OR x.hi) EQ|NE 0). The OR is expected to set the condition flags so we // don't need to generate a redundant compare against 0, we only generate a SETCC|JCC instruction. // // XOR is used rather than SUB because it is commutative and thus allows swapping the operands when // the first happens to be a constant. Usually only the second compare operand is a constant but it's // still possible to have a constant on the left side. For example, when src1 is a uint->ulong cast // then hiSrc1 would be 0. // if (loSrc1->OperIs(GT_CNS_INT)) { std::swap(loSrc1, loSrc2); } if (loSrc2->IsIntegralConst(0)) { BlockRange().Remove(loSrc2); loCmp = loSrc1; } else { loCmp = comp->gtNewOperNode(GT_XOR, TYP_INT, loSrc1, loSrc2); BlockRange().InsertBefore(cmp, loCmp); ContainCheckBinary(loCmp->AsOp()); } if (hiSrc1->OperIs(GT_CNS_INT)) { std::swap(hiSrc1, hiSrc2); } if (hiSrc2->IsIntegralConst(0)) { BlockRange().Remove(hiSrc2); hiCmp = hiSrc1; } else { hiCmp = comp->gtNewOperNode(GT_XOR, TYP_INT, hiSrc1, hiSrc2); BlockRange().InsertBefore(cmp, hiCmp); ContainCheckBinary(hiCmp->AsOp()); } hiCmp = comp->gtNewOperNode(GT_OR, TYP_INT, loCmp, hiCmp); BlockRange().InsertBefore(cmp, hiCmp); ContainCheckBinary(hiCmp->AsOp()); } else { assert(cmp->OperIs(GT_LT, GT_LE, GT_GE, GT_GT)); // // If the compare is signed then (x LT|GE y) can be transformed into ((x SUB y) LT|GE 0). // If the compare is unsigned we can still use SUB but we need to check the Carry flag, // not the actual result. In both cases we can simply check the appropiate condition flags // and ignore the actual result: // SUB_LO loSrc1, loSrc2 // SUB_HI hiSrc1, hiSrc2 // SETCC|JCC (signed|unsigned LT|GE) // If loSrc2 happens to be 0 then the first SUB can be eliminated and the second one can // be turned into a CMP because the first SUB would have set carry to 0. This effectively // transforms a long compare against 0 into an int compare of the high part against 0. // // (x LE|GT y) can to be transformed into ((x SUB y) LE|GT 0) but checking that a long value // is greater than 0 is not so easy. We need to turn this into a positive/negative check // like the one we get for LT|GE compares, this can be achieved by swapping the compare: // (x LE|GT y) becomes (y GE|LT x) // // Having to swap operands is problematic when the second operand is a constant. The constant // moves to the first operand where it cannot be contained and thus needs a register. This can // be avoided by changing the constant such that LE|GT becomes LT|GE: // (x LE|GT 41) becomes (x LT|GE 42) // if (cmp->OperIs(GT_LE, GT_GT)) { bool mustSwap = true; if (loSrc2->OperIs(GT_CNS_INT) && hiSrc2->OperIs(GT_CNS_INT)) { uint32_t loValue = static_cast<uint32_t>(loSrc2->AsIntCon()->IconValue()); uint32_t hiValue = static_cast<uint32_t>(hiSrc2->AsIntCon()->IconValue()); uint64_t value = static_cast<uint64_t>(loValue) | (static_cast<uint64_t>(hiValue) << 32); uint64_t maxValue = cmp->IsUnsigned() ? UINT64_MAX : INT64_MAX; if (value != maxValue) { value++; loValue = value & UINT32_MAX; hiValue = (value >> 32) & UINT32_MAX; loSrc2->AsIntCon()->SetIconValue(loValue); hiSrc2->AsIntCon()->SetIconValue(hiValue); condition = cmp->OperIs(GT_LE) ? GT_LT : GT_GE; mustSwap = false; } } if (mustSwap) { std::swap(loSrc1, loSrc2); std::swap(hiSrc1, hiSrc2); condition = GenTree::SwapRelop(condition); } } assert((condition == GT_LT) || (condition == GT_GE)); if (loSrc2->IsIntegralConst(0)) { BlockRange().Remove(loSrc2); // Very conservative dead code removal... but it helps. if (loSrc1->OperIs(GT_CNS_INT, GT_LCL_VAR, GT_LCL_FLD)) { BlockRange().Remove(loSrc1); } else { loSrc1->SetUnusedValue(); } hiCmp = comp->gtNewOperNode(GT_CMP, TYP_VOID, hiSrc1, hiSrc2); BlockRange().InsertBefore(cmp, hiCmp); ContainCheckCompare(hiCmp->AsOp()); } else { loCmp = comp->gtNewOperNode(GT_CMP, TYP_VOID, loSrc1, loSrc2); hiCmp = comp->gtNewOperNode(GT_SUB_HI, TYP_INT, hiSrc1, hiSrc2); BlockRange().InsertBefore(cmp, loCmp, hiCmp); ContainCheckCompare(loCmp->AsOp()); ContainCheckBinary(hiCmp->AsOp()); // // Try to move the first SUB_HI operands right in front of it, this allows using // a single temporary register instead of 2 (one for CMP and one for SUB_HI). Do // this only for locals as they won't change condition flags. Note that we could // move constants (except 0 which generates XOR reg, reg) but it's extremely rare // to have a constant as the first operand. // if (hiSrc1->OperIs(GT_LCL_VAR, GT_LCL_FLD)) { BlockRange().Remove(hiSrc1); BlockRange().InsertBefore(hiCmp, hiSrc1); } } } hiCmp->gtFlags |= GTF_SET_FLAGS; if (hiCmp->IsValue()) { hiCmp->SetUnusedValue(); } LIR::Use cmpUse; if (BlockRange().TryGetUse(cmp, &cmpUse) && cmpUse.User()->OperIs(GT_JTRUE)) { BlockRange().Remove(cmp); GenTree* jcc = cmpUse.User(); jcc->AsOp()->gtOp1 = nullptr; jcc->ChangeOper(GT_JCC); jcc->gtFlags |= GTF_USE_FLAGS; jcc->AsCC()->gtCondition = GenCondition::FromIntegralRelop(condition, cmp->IsUnsigned()); } else { cmp->AsOp()->gtOp1 = nullptr; cmp->AsOp()->gtOp2 = nullptr; cmp->ChangeOper(GT_SETCC); cmp->gtFlags |= GTF_USE_FLAGS; cmp->AsCC()->gtCondition = GenCondition::FromIntegralRelop(condition, cmp->IsUnsigned()); } return cmp->gtNext; } #endif // !TARGET_64BIT //------------------------------------------------------------------------ // Lowering::OptimizeConstCompare: Performs various "compare with const" optimizations. // // Arguments: // cmp - the compare node // // Return Value: // The original compare node if lowering should proceed as usual or the next node // to lower if the compare node was changed in such a way that lowering is no // longer needed. // // Notes: // - Narrow operands to enable memory operand containment (XARCH specific). // - Transform cmp(and(x, y), 0) into test(x, y) (XARCH/Arm64 specific but could // be used for ARM as well if support for GT_TEST_EQ/GT_TEST_NE is added). // - Transform TEST(x, LSH(1, y)) into BT(x, y) (XARCH specific) // - Transform RELOP(OP, 0) into SETCC(OP) or JCC(OP) if OP can set the // condition flags appropriately (XARCH/ARM64 specific but could be extended // to ARM32 as well if ARM32 codegen supports GTF_SET_FLAGS). // GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) { assert(cmp->gtGetOp2()->IsIntegralConst()); #if defined(TARGET_XARCH) || defined(TARGET_ARM64) GenTree* op1 = cmp->gtGetOp1(); GenTreeIntCon* op2 = cmp->gtGetOp2()->AsIntCon(); ssize_t op2Value = op2->IconValue(); #ifdef TARGET_XARCH var_types op1Type = op1->TypeGet(); if (IsContainableMemoryOp(op1) && varTypeIsSmall(op1Type) && FitsIn(op1Type, op2Value)) { // // If op1's type is small then try to narrow op2 so it has the same type as op1. // Small types are usually used by memory loads and if both compare operands have // the same type then the memory load can be contained. In certain situations // (e.g "cmp ubyte, 200") we also get a smaller instruction encoding. // op2->gtType = op1Type; } else #endif if (op1->OperIs(GT_CAST) && !op1->gtOverflow()) { GenTreeCast* cast = op1->AsCast(); var_types castToType = cast->CastToType(); GenTree* castOp = cast->gtGetOp1(); if (((castToType == TYP_BOOL) || (castToType == TYP_UBYTE)) && FitsIn<UINT8>(op2Value)) { // // Since we're going to remove the cast we need to be able to narrow the cast operand // to the cast type. This can be done safely only for certain opers (e.g AND, OR, XOR). // Some opers just can't be narrowed (e.g DIV, MUL) while other could be narrowed but // doing so would produce incorrect results (e.g. RSZ, RSH). // // The below list of handled opers is conservative but enough to handle the most common // situations. In particular this include CALL, sometimes the JIT unnecessarilly widens // the result of bool returning calls. // bool removeCast = #ifdef TARGET_ARM64 (op2Value == 0) && cmp->OperIs(GT_EQ, GT_NE, GT_GT) && #endif (castOp->OperIs(GT_CALL, GT_LCL_VAR) || castOp->OperIs(GT_OR, GT_XOR, GT_AND) #ifdef TARGET_XARCH || IsContainableMemoryOp(castOp) #endif ); if (removeCast) { assert(!castOp->gtOverflowEx()); // Must not be an overflow checking operation #ifdef TARGET_ARM64 bool cmpEq = cmp->OperIs(GT_EQ); cmp->SetOperRaw(cmpEq ? GT_TEST_EQ : GT_TEST_NE); op2->SetIconValue(0xff); op2->gtType = castOp->gtType; #else castOp->gtType = castToType; op2->gtType = castToType; #endif // If we have any contained memory ops on castOp, they must now not be contained. if (castOp->OperIs(GT_OR, GT_XOR, GT_AND)) { GenTree* op1 = castOp->gtGetOp1(); if ((op1 != nullptr) && !op1->IsCnsIntOrI()) { op1->ClearContained(); } GenTree* op2 = castOp->gtGetOp2(); if ((op2 != nullptr) && !op2->IsCnsIntOrI()) { op2->ClearContained(); } } cmp->AsOp()->gtOp1 = castOp; BlockRange().Remove(cast); } } } else if (op1->OperIs(GT_AND) && cmp->OperIs(GT_EQ, GT_NE)) { // // Transform ((x AND y) EQ|NE 0) into (x TEST_EQ|TEST_NE y) when possible. // GenTree* andOp1 = op1->gtGetOp1(); GenTree* andOp2 = op1->gtGetOp2(); if (op2Value != 0) { // // If we don't have a 0 compare we can get one by transforming ((x AND mask) EQ|NE mask) // into ((x AND mask) NE|EQ 0) when mask is a single bit. // if (isPow2<target_size_t>(static_cast<target_size_t>(op2Value)) && andOp2->IsIntegralConst(op2Value)) { op2Value = 0; op2->SetIconValue(0); cmp->SetOperRaw(GenTree::ReverseRelop(cmp->OperGet())); } } if (op2Value == 0) { BlockRange().Remove(op1); BlockRange().Remove(op2); cmp->SetOperRaw(cmp->OperIs(GT_EQ) ? GT_TEST_EQ : GT_TEST_NE); cmp->AsOp()->gtOp1 = andOp1; cmp->AsOp()->gtOp2 = andOp2; // We will re-evaluate containment below andOp1->ClearContained(); andOp2->ClearContained(); #ifdef TARGET_XARCH if (IsContainableMemoryOp(andOp1) && andOp2->IsIntegralConst()) { // // For "test" we only care about the bits that are set in the second operand (mask). // If the mask fits in a small type then we can narrow both operands to generate a "test" // instruction with a smaller encoding ("test" does not have a r/m32, imm8 form) and avoid // a widening load in some cases. // // For 16 bit operands we narrow only if the memory operand is already 16 bit. This matches // the behavior of a previous implementation and avoids adding more cases where we generate // 16 bit instructions that require a length changing prefix (0x66). These suffer from // significant decoder stalls on Intel CPUs. // // We could also do this for 64 bit masks that fit into 32 bit but it doesn't help. // In such cases morph narrows down the existing GT_AND by inserting a cast between it and // the memory operand so we'd need to add more code to recognize and eliminate that cast. // size_t mask = static_cast<size_t>(andOp2->AsIntCon()->IconValue()); if (FitsIn<UINT8>(mask)) { andOp1->gtType = TYP_UBYTE; andOp2->gtType = TYP_UBYTE; } else if (FitsIn<UINT16>(mask) && genTypeSize(andOp1) == 2) { andOp1->gtType = TYP_USHORT; andOp2->gtType = TYP_USHORT; } } #endif } } if (cmp->OperIs(GT_TEST_EQ, GT_TEST_NE)) { #ifdef TARGET_XARCH // // Transform TEST_EQ|NE(x, LSH(1, y)) into BT(x, y) when possible. Using BT // results in smaller and faster code. It also doesn't have special register // requirements, unlike LSH that requires the shift count to be in ECX. // Note that BT has the same behavior as LSH when the bit index exceeds the // operand bit size - it uses (bit_index MOD bit_size). // GenTree* lsh = cmp->gtGetOp2(); LIR::Use cmpUse; if (lsh->OperIs(GT_LSH) && varTypeIsIntOrI(lsh->TypeGet()) && lsh->gtGetOp1()->IsIntegralConst(1) && BlockRange().TryGetUse(cmp, &cmpUse)) { GenCondition condition = cmp->OperIs(GT_TEST_NE) ? GenCondition::C : GenCondition::NC; cmp->SetOper(GT_BT); cmp->gtType = TYP_VOID; cmp->gtFlags |= GTF_SET_FLAGS; cmp->AsOp()->gtOp2 = lsh->gtGetOp2(); cmp->gtGetOp2()->ClearContained(); BlockRange().Remove(lsh->gtGetOp1()); BlockRange().Remove(lsh); GenTreeCC* cc; if (cmpUse.User()->OperIs(GT_JTRUE)) { cmpUse.User()->ChangeOper(GT_JCC); cc = cmpUse.User()->AsCC(); cc->gtCondition = condition; } else { cc = new (comp, GT_SETCC) GenTreeCC(GT_SETCC, condition, TYP_INT); BlockRange().InsertAfter(cmp, cc); cmpUse.ReplaceWith(cc); } cc->gtFlags |= GTF_USE_FLAGS; return cmp->gtNext; } #endif // TARGET_XARCH } else if (cmp->OperIs(GT_EQ, GT_NE)) { GenTree* op1 = cmp->gtGetOp1(); GenTree* op2 = cmp->gtGetOp2(); // TODO-CQ: right now the below peep is inexpensive and gets the benefit in most // cases because in majority of cases op1, op2 and cmp would be in that order in // execution. In general we should be able to check that all the nodes that come // after op1 do not modify the flags so that it is safe to avoid generating a // test instruction. if (op2->IsIntegralConst(0) && (op1->gtNext == op2) && (op2->gtNext == cmp) && #ifdef TARGET_XARCH (op1->OperIs(GT_AND, GT_OR, GT_XOR, GT_ADD, GT_SUB, GT_NEG) #ifdef FEATURE_HW_INTRINSICS || (op1->OperIs(GT_HWINTRINSIC) && emitter::DoesWriteZeroFlag(HWIntrinsicInfo::lookupIns(op1->AsHWIntrinsic()))) #endif // FEATURE_HW_INTRINSICS ) #else // TARGET_ARM64 op1->OperIs(GT_AND, GT_ADD, GT_SUB) #endif ) { op1->gtFlags |= GTF_SET_FLAGS; op1->SetUnusedValue(); BlockRange().Remove(op2); GenTree* next = cmp->gtNext; GenTree* cc; genTreeOps ccOp; LIR::Use cmpUse; // Fast check for the common case - relop used by a JTRUE that immediately follows it. if ((next != nullptr) && next->OperIs(GT_JTRUE) && (next->gtGetOp1() == cmp)) { cc = next; ccOp = GT_JCC; next = nullptr; BlockRange().Remove(cmp); } else if (BlockRange().TryGetUse(cmp, &cmpUse) && cmpUse.User()->OperIs(GT_JTRUE)) { cc = cmpUse.User(); ccOp = GT_JCC; next = nullptr; BlockRange().Remove(cmp); } else // The relop is not used by a JTRUE or it is not used at all. { // Transform the relop node it into a SETCC. If it's not used we could remove // it completely but that means doing more work to handle a rare case. cc = cmp; ccOp = GT_SETCC; } GenCondition condition = GenCondition::FromIntegralRelop(cmp); cc->ChangeOper(ccOp); cc->AsCC()->gtCondition = condition; cc->gtFlags |= GTF_USE_FLAGS; return next; } } #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) return cmp; } //------------------------------------------------------------------------ // Lowering::LowerCompare: Lowers a compare node. // // Arguments: // cmp - the compare node // // Return Value: // The next node to lower. // GenTree* Lowering::LowerCompare(GenTree* cmp) { #ifndef TARGET_64BIT if (cmp->gtGetOp1()->TypeGet() == TYP_LONG) { return DecomposeLongCompare(cmp); } #endif if (cmp->gtGetOp2()->IsIntegralConst() && !comp->opts.MinOpts()) { GenTree* next = OptimizeConstCompare(cmp); // If OptimizeConstCompare return the compare node as "next" then we need to continue lowering. if (next != cmp) { return next; } } #ifdef TARGET_XARCH if (cmp->gtGetOp1()->TypeGet() == cmp->gtGetOp2()->TypeGet()) { if (varTypeIsSmall(cmp->gtGetOp1()->TypeGet()) && varTypeIsUnsigned(cmp->gtGetOp1()->TypeGet())) { // // If both operands have the same type then codegen will use the common operand type to // determine the instruction type. For small types this would result in performing a // signed comparison of two small unsigned values without zero extending them to TYP_INT // which is incorrect. Note that making the comparison unsigned doesn't imply that codegen // has to generate a small comparison, it can still correctly generate a TYP_INT comparison. // cmp->gtFlags |= GTF_UNSIGNED; } } #endif // TARGET_XARCH ContainCheckCompare(cmp->AsOp()); return cmp->gtNext; } //------------------------------------------------------------------------ // Lowering::LowerJTrue: Lowers a JTRUE node. // // Arguments: // jtrue - the JTRUE node // // Return Value: // The next node to lower (usually nullptr). // // Notes: // On ARM64 this may remove the JTRUE node and transform its associated // relop into a JCMP node. // GenTree* Lowering::LowerJTrue(GenTreeOp* jtrue) { #ifdef TARGET_ARM64 GenTree* relop = jtrue->gtGetOp1(); GenTree* relopOp2 = relop->AsOp()->gtGetOp2(); if ((relop->gtNext == jtrue) && relopOp2->IsCnsIntOrI()) { bool useJCMP = false; GenTreeFlags flags = GTF_EMPTY; if (relop->OperIs(GT_EQ, GT_NE) && relopOp2->IsIntegralConst(0)) { // Codegen will use cbz or cbnz in codegen which do not affect the flag register flags = relop->OperIs(GT_EQ) ? GTF_JCMP_EQ : GTF_EMPTY; useJCMP = true; } else if (relop->OperIs(GT_TEST_EQ, GT_TEST_NE) && isPow2(relopOp2->AsIntCon()->IconValue())) { // Codegen will use tbz or tbnz in codegen which do not affect the flag register flags = GTF_JCMP_TST | (relop->OperIs(GT_TEST_EQ) ? GTF_JCMP_EQ : GTF_EMPTY); useJCMP = true; } if (useJCMP) { relop->SetOper(GT_JCMP); relop->gtFlags &= ~(GTF_JCMP_TST | GTF_JCMP_EQ); relop->gtFlags |= flags; relop->gtType = TYP_VOID; relopOp2->SetContained(); BlockRange().Remove(jtrue); assert(relop->gtNext == nullptr); return nullptr; } } #endif // TARGET_ARM64 ContainCheckJTrue(jtrue); assert(jtrue->gtNext == nullptr); return nullptr; } //---------------------------------------------------------------------------------------------- // LowerNodeCC: Lowers a node that produces a boolean value by setting the condition flags. // // Arguments: // node - The node to lower // condition - The condition code of the generated SETCC/JCC node // // Return Value: // A SETCC/JCC node or nullptr if `node` is not used. // // Notes: // This simply replaces `node`'s use with an appropiate SETCC/JCC node, // `node` is not actually changed, except by having its GTF_SET_FLAGS set. // It's the caller's responsibility to change `node` such that it only // sets the condition flags, without producing a boolean value. // GenTreeCC* Lowering::LowerNodeCC(GenTree* node, GenCondition condition) { // Skip over a chain of EQ/NE(x, 0) relops. This may be present either // because `node` is not a relop and so it cannot be used directly by a // JTRUE, or because the frontend failed to remove a EQ/NE(x, 0) that's // used as logical negation. // // Usually there's only one such relop but there's little difference // between removing one or all so we may as well remove them all. // // We can't allow any other nodes between `node` and its user because we // have no way of knowing if those nodes change flags or not. So we're looking // to skip over a sequence of appropriately connected zero and EQ/NE nodes. // The x in EQ/NE(x, 0) GenTree* relop = node; // The first node of the relop sequence GenTree* first = node->gtNext; // The node following the relop sequence GenTree* next = first; while ((next != nullptr) && next->IsIntegralConst(0) && (next->gtNext != nullptr) && next->gtNext->OperIs(GT_EQ, GT_NE) && (next->gtNext->AsOp()->gtGetOp1() == relop) && (next->gtNext->AsOp()->gtGetOp2() == next)) { relop = next->gtNext; next = relop->gtNext; if (relop->OperIs(GT_EQ)) { condition = GenCondition::Reverse(condition); } } GenTreeCC* cc = nullptr; // Next may be null if `node` is not used. In that case we don't need to generate a SETCC node. if (next != nullptr) { if (next->OperIs(GT_JTRUE)) { // If the instruction immediately following 'relop', i.e. 'next' is a conditional branch, // it should always have 'relop' as its 'op1'. If it doesn't, then we have improperly // constructed IL (the setting of a condition code should always immediately precede its // use, since the JIT doesn't track dataflow for condition codes). Still, if it happens // it's not our problem, it simply means that `node` is not used and can be removed. if (next->AsUnOp()->gtGetOp1() == relop) { assert(relop->OperIsCompare()); next->ChangeOper(GT_JCC); cc = next->AsCC(); cc->gtCondition = condition; } } else { // If the node is used by something other than a JTRUE then we need to insert a // SETCC node to materialize the boolean value. LIR::Use use; if (BlockRange().TryGetUse(relop, &use)) { cc = new (comp, GT_SETCC) GenTreeCC(GT_SETCC, condition, TYP_INT); BlockRange().InsertAfter(node, cc); use.ReplaceWith(cc); } } } if (cc != nullptr) { node->gtFlags |= GTF_SET_FLAGS; cc->gtFlags |= GTF_USE_FLAGS; } // Remove the chain of EQ/NE(x, 0) relop nodes, if any. Note that if a SETCC was // inserted after `node`, `first` still points to the node that was initially // after `node`. if (relop != node) { BlockRange().Remove(first, relop); } return cc; } // Lower "jmp <method>" tail call to insert PInvoke method epilog if required. void Lowering::LowerJmpMethod(GenTree* jmp) { assert(jmp->OperGet() == GT_JMP); JITDUMP("lowering GT_JMP\n"); DISPNODE(jmp); JITDUMP("============"); // If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that // a method returns. if (comp->compMethodRequiresPInvokeFrame()) { InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(jmp)); } } // Lower GT_RETURN node to insert PInvoke method epilog if required. void Lowering::LowerRet(GenTreeUnOp* ret) { assert(ret->OperGet() == GT_RETURN); JITDUMP("lowering GT_RETURN\n"); DISPNODE(ret); JITDUMP("============"); GenTree* retVal = ret->gtGetOp1(); // There are two kinds of retyping: // - A simple bitcast can be inserted when: // - We're returning a floating type as an integral type or vice-versa, or // - If we're returning a struct as a primitive type, we change the type of // 'retval' in 'LowerRetStructLclVar()' bool needBitcast = (ret->TypeGet() != TYP_VOID) && (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(ret->gtGetOp1())); bool doPrimitiveBitcast = false; if (needBitcast) { doPrimitiveBitcast = (!varTypeIsStruct(ret) && !varTypeIsStruct(retVal)); } if (doPrimitiveBitcast) { // Add a simple bitcast when both types are not structs. // If one type is a struct it will be handled below. #if defined(DEBUG) assert(!varTypeIsStruct(ret) && !varTypeIsStruct(retVal)); #endif GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), retVal); ret->gtOp1 = bitcast; BlockRange().InsertBefore(ret, bitcast); ContainCheckBitCast(bitcast); } else if (ret->TypeGet() != TYP_VOID) { #if FEATURE_MULTIREG_RET if (retVal->OperIs(GT_LCL_VAR) && varTypeIsStruct(retVal)) { ReturnTypeDesc retTypeDesc; LclVarDsc* varDsc = nullptr; varDsc = comp->lvaGetDesc(retVal->AsLclVar()); retTypeDesc.InitializeStructReturnType(comp, varDsc->GetStructHnd(), comp->info.compCallConv); if (retTypeDesc.GetReturnRegCount() > 1) { CheckMultiRegLclVar(retVal->AsLclVar(), &retTypeDesc); } } #endif // FEATURE_MULTIREG_RET #ifdef DEBUG if (varTypeIsStruct(ret->TypeGet()) != varTypeIsStruct(retVal->TypeGet())) { if (varTypeIsStruct(ret->TypeGet())) { assert(comp->info.compRetNativeType != TYP_STRUCT); var_types retActualType = genActualType(comp->info.compRetNativeType); var_types retValActualType = genActualType(retVal->TypeGet()); bool constStructInit = retVal->IsConstInitVal(); bool implicitCastFromSameOrBiggerSize = (genTypeSize(retActualType) <= genTypeSize(retValActualType)); // This could happen if we have retyped op1 as a primitive type during struct promotion, // check `retypedFieldsMap` for details. bool actualTypesMatch = (retActualType == retValActualType); assert(actualTypesMatch || constStructInit || implicitCastFromSameOrBiggerSize); } } #endif // DEBUG if (varTypeIsStruct(ret)) { LowerRetStruct(ret); } else if (!ret->TypeIs(TYP_VOID) && varTypeIsStruct(retVal)) { // Return struct as a primitive using Unsafe cast. assert(retVal->OperIs(GT_LCL_VAR)); LowerRetSingleRegStructLclVar(ret); } } // Method doing PInvokes has exactly one return block unless it has tail calls. if (comp->compMethodRequiresPInvokeFrame() && (comp->compCurBB == comp->genReturnBB)) { InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(ret)); } ContainCheckRet(ret); } //---------------------------------------------------------------------------------------------- // LowerStoreLocCommon: platform idependent part of local var or field store lowering. // // Arguments: // lclStore - The store lcl node to lower. // void Lowering::LowerStoreLocCommon(GenTreeLclVarCommon* lclStore) { assert(lclStore->OperIs(GT_STORE_LCL_FLD, GT_STORE_LCL_VAR)); JITDUMP("lowering store lcl var/field (before):\n"); DISPTREERANGE(BlockRange(), lclStore); JITDUMP("\n"); GenTree* src = lclStore->gtGetOp1(); LclVarDsc* varDsc = comp->lvaGetDesc(lclStore); const bool srcIsMultiReg = src->IsMultiRegNode(); const bool dstIsMultiReg = lclStore->IsMultiRegLclVar(); if (!dstIsMultiReg && varTypeIsStruct(varDsc)) { // TODO-Cleanup: we want to check `varDsc->lvRegStruct` as the last condition instead of `!varDsc->lvPromoted`, // but we do not set it for `CSE` vars so it is currently failing. assert(varDsc->CanBeReplacedWithItsField(comp) || varDsc->lvDoNotEnregister || !varDsc->lvPromoted); if (varDsc->CanBeReplacedWithItsField(comp)) { assert(varDsc->lvFieldCnt == 1); unsigned fldNum = varDsc->lvFieldLclStart; LclVarDsc* fldDsc = comp->lvaGetDesc(fldNum); JITDUMP("Replacing an independently promoted local var V%02u with its only field V%02u for the store " "from a call [%06u]\n", lclStore->GetLclNum(), fldNum, comp->dspTreeID(lclStore)); lclStore->SetLclNum(fldNum); lclStore->ChangeType(fldDsc->TypeGet()); varDsc = fldDsc; } } if (srcIsMultiReg || dstIsMultiReg) { const ReturnTypeDesc* retTypeDesc = nullptr; if (src->OperIs(GT_CALL)) { retTypeDesc = src->AsCall()->GetReturnTypeDesc(); } CheckMultiRegLclVar(lclStore->AsLclVar(), retTypeDesc); } const var_types lclRegType = varDsc->GetRegisterType(lclStore); if ((lclStore->TypeGet() == TYP_STRUCT) && !srcIsMultiReg) { bool convertToStoreObj; if (src->OperGet() == GT_CALL) { GenTreeCall* call = src->AsCall(); const ClassLayout* layout = varDsc->GetLayout(); #ifdef DEBUG const unsigned slotCount = layout->GetSlotCount(); #if defined(TARGET_XARCH) && !defined(UNIX_AMD64_ABI) // Windows x64 doesn't have multireg returns, // x86 uses it only for long return type, not for structs. assert(slotCount == 1); assert(lclRegType != TYP_UNDEF); #else // !TARGET_XARCH || UNIX_AMD64_ABI if (!varDsc->lvIsHfa()) { if (slotCount > 1) { assert(call->HasMultiRegRetVal()); } else { unsigned size = layout->GetSize(); assert((size <= 8) || (size == 16)); bool isPowerOf2 = (((size - 1) & size) == 0); bool isTypeDefined = (lclRegType != TYP_UNDEF); assert(isPowerOf2 == isTypeDefined); } } #endif // !TARGET_XARCH || UNIX_AMD64_ABI #endif // DEBUG #if !defined(WINDOWS_AMD64_ABI) if (!call->HasMultiRegRetVal() && (lclRegType == TYP_UNDEF)) { // If we have a single return register, // but we can't retype it as a primitive type, we must spill it. GenTreeLclVar* spilledCall = SpillStructCallResult(call); lclStore->gtOp1 = spilledCall; src = lclStore->gtOp1; JITDUMP("lowering store lcl var/field has to spill call src.\n"); LowerStoreLocCommon(lclStore); return; } #endif // !WINDOWS_AMD64_ABI convertToStoreObj = false; } else if (!varDsc->IsEnregisterableType()) { convertToStoreObj = true; } else if (src->OperIs(GT_CNS_INT)) { assert(src->IsIntegralConst(0) && "expected an INIT_VAL for non-zero init."); #ifdef FEATURE_SIMD if (varTypeIsSIMD(lclRegType)) { CorInfoType simdBaseJitType = comp->getBaseJitTypeOfSIMDLocal(lclStore); if (simdBaseJitType == CORINFO_TYPE_UNDEF) { // Lie about the type if we don't know/have it. simdBaseJitType = CORINFO_TYPE_FLOAT; } GenTreeSIMD* simdTree = comp->gtNewSIMDNode(lclRegType, src, SIMDIntrinsicInit, simdBaseJitType, varDsc->lvExactSize); BlockRange().InsertAfter(src, simdTree); LowerSIMD(simdTree); src = simdTree; lclStore->gtOp1 = src; convertToStoreObj = false; } else #endif // FEATURE_SIMD { convertToStoreObj = false; } } else if (!src->OperIs(GT_LCL_VAR)) { convertToStoreObj = true; } else { assert(src->OperIs(GT_LCL_VAR)); convertToStoreObj = false; } if (convertToStoreObj) { const unsigned lclNum = lclStore->GetLclNum(); GenTreeLclVar* addr = comp->gtNewLclVarAddrNode(lclNum, TYP_BYREF); comp->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::BlockOp)); addr->gtFlags |= GTF_VAR_DEF; assert(!addr->IsPartialLclFld(comp)); addr->gtFlags |= GTF_DONT_CSE; // Create the assignment node. lclStore->ChangeOper(GT_STORE_OBJ); GenTreeBlk* objStore = lclStore->AsObj(); // Only the GTF_LATE_ARG flag (if present) is preserved. objStore->gtFlags &= GTF_LATE_ARG; objStore->gtFlags |= GTF_ASG | GTF_IND_NONFAULTING | GTF_IND_TGT_NOT_HEAP; #ifndef JIT32_GCENCODER objStore->gtBlkOpGcUnsafe = false; #endif objStore->gtBlkOpKind = GenTreeObj::BlkOpKindInvalid; objStore->SetLayout(varDsc->GetLayout()); objStore->SetAddr(addr); objStore->SetData(src); BlockRange().InsertBefore(objStore, addr); LowerBlockStoreCommon(objStore); return; } } // src and dst can be in registers, check if we need a bitcast. if (!src->TypeIs(TYP_STRUCT) && (varTypeUsesFloatReg(lclRegType) != varTypeUsesFloatReg(src))) { assert(!srcIsMultiReg && !dstIsMultiReg); assert(lclStore->OperIsLocalStore()); assert(lclRegType != TYP_UNDEF); GenTree* bitcast = comp->gtNewBitCastNode(lclRegType, src); lclStore->gtOp1 = bitcast; src = lclStore->gtGetOp1(); BlockRange().InsertBefore(lclStore, bitcast); ContainCheckBitCast(bitcast); } LowerStoreLoc(lclStore); JITDUMP("lowering store lcl var/field (after):\n"); DISPTREERANGE(BlockRange(), lclStore); JITDUMP("\n"); } //---------------------------------------------------------------------------------------------- // LowerRetStructLclVar: Lowers a struct return node. // // Arguments: // node - The return node to lower. // void Lowering::LowerRetStruct(GenTreeUnOp* ret) { #ifdef TARGET_ARM64 if (GlobalJitOptions::compFeatureHfa) { if (varTypeIsSIMD(ret)) { if (comp->info.compRetNativeType == TYP_STRUCT) { assert(varTypeIsSIMD(ret->gtGetOp1())); assert(comp->compMethodReturnsMultiRegRegTypeAlternate()); ret->ChangeType(comp->info.compRetNativeType); } else { assert(comp->info.compRetNativeType == ret->TypeGet()); GenTree* retVal = ret->gtGetOp1(); if (retVal->TypeGet() != ret->TypeGet()) { assert(retVal->OperIs(GT_LCL_VAR)); LowerRetSingleRegStructLclVar(ret); } return; } } } #endif // TARGET_ARM64 if (comp->compMethodReturnsMultiRegRegTypeAlternate()) { return; } assert(ret->OperIs(GT_RETURN)); assert(varTypeIsStruct(ret)); GenTree* retVal = ret->gtGetOp1(); // Note: small types are returned as INT. var_types nativeReturnType = genActualType(comp->info.compRetNativeType); ret->ChangeType(nativeReturnType); switch (retVal->OperGet()) { case GT_CALL: assert(retVal->TypeIs(nativeReturnType)); // Type should be changed during call processing. break; case GT_CNS_INT: // When we promote LCL_VAR single fields into return // we could have all type of constans here. if (varTypeUsesFloatReg(nativeReturnType)) { // Do not expect `initblock` for SIMD* types, // only 'initobj'. assert(retVal->AsIntCon()->IconValue() == 0); retVal->BashToConst(0.0, TYP_FLOAT); } break; case GT_OBJ: retVal->ChangeOper(GT_IND); FALLTHROUGH; case GT_IND: retVal->ChangeType(nativeReturnType); LowerIndir(retVal->AsIndir()); break; case GT_LCL_VAR: LowerRetSingleRegStructLclVar(ret); break; #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) #ifdef FEATURE_SIMD case GT_SIMD: #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: #endif // FEATURE_HW_INTRINSICS { assert(!retVal->TypeIs(TYP_STRUCT)); if (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(retVal)) { GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), retVal); ret->gtOp1 = bitcast; BlockRange().InsertBefore(ret, bitcast); ContainCheckBitCast(bitcast); } } break; #endif // FEATURE_SIMD || FEATURE_HW_INTRINSICS case GT_LCL_FLD: { #ifdef DEBUG LclVarDsc* varDsc = comp->lvaGetDesc(retVal->AsLclFld()); assert(varDsc->lvDoNotEnregister); #endif retVal->ChangeType(nativeReturnType); } break; default: assert(varTypeIsEnregisterable(retVal)); if (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(retVal)) { GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), retVal); ret->gtOp1 = bitcast; BlockRange().InsertBefore(ret, bitcast); ContainCheckBitCast(bitcast); } break; } } //---------------------------------------------------------------------------------------------- // LowerRetSingleRegStructLclVar: Lowers a return node with a struct lclVar as a source. // // Arguments: // node - The return node to lower. // // Notes: // - the function is only for LclVars that are returned in one register; // - if LclVar is allocated in memory then read it as return type; // - if LclVar can be enregistered read it as register type and add a bitcast if necessary; // void Lowering::LowerRetSingleRegStructLclVar(GenTreeUnOp* ret) { assert(!comp->compMethodReturnsMultiRegRegTypeAlternate()); assert(ret->OperIs(GT_RETURN)); GenTreeLclVarCommon* lclVar = ret->gtGetOp1()->AsLclVar(); assert(lclVar->OperIs(GT_LCL_VAR)); unsigned lclNum = lclVar->GetLclNum(); LclVarDsc* varDsc = comp->lvaGetDesc(lclNum); if (varDsc->lvPromoted) { // TODO-1stClassStructs: We can no longer independently promote // or enregister this struct, since it is referenced as a whole. comp->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::BlockOpRet)); } if (varDsc->lvDoNotEnregister) { lclVar->ChangeOper(GT_LCL_FLD); lclVar->AsLclFld()->SetLclOffs(0); // We are returning as a primitive type and the lcl is of struct type. assert(comp->info.compRetNativeType != TYP_STRUCT); assert((genTypeSize(comp->info.compRetNativeType) == genTypeSize(ret)) || (varTypeIsIntegral(ret) && varTypeIsIntegral(comp->info.compRetNativeType) && (genTypeSize(comp->info.compRetNativeType) <= genTypeSize(ret)))); // If the actual return type requires normalization, then make sure we // do so by using the correct small type for the GT_LCL_FLD. It would // be conservative to check just compRetNativeType for this since small // structs are normalized to primitive types when they are returned in // registers, so we would normalize for them as well. if (varTypeIsSmall(comp->info.compRetType)) { assert(genTypeSize(comp->info.compRetNativeType) == genTypeSize(comp->info.compRetType)); lclVar->ChangeType(comp->info.compRetType); } else { // Otherwise we don't mind that we leave the upper bits undefined. lclVar->ChangeType(ret->TypeGet()); } } else { const var_types lclVarType = varDsc->GetRegisterType(lclVar); assert(lclVarType != TYP_UNDEF); const var_types actualType = genActualType(lclVarType); lclVar->ChangeType(actualType); if (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(lclVarType)) { GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), ret->gtOp1); ret->gtOp1 = bitcast; BlockRange().InsertBefore(ret, bitcast); ContainCheckBitCast(bitcast); } } } //---------------------------------------------------------------------------------------------- // LowerCallStruct: Lowers a call node that returns a stuct. // // Arguments: // call - The call node to lower. // // Notes: // - this handles only single-register returns; // - it transforms the call's user for `GT_STOREIND`. // void Lowering::LowerCallStruct(GenTreeCall* call) { assert(varTypeIsStruct(call)); if (call->HasMultiRegRetVal()) { return; } if (GlobalJitOptions::compFeatureHfa) { if (comp->IsHfa(call)) { #if defined(TARGET_ARM64) assert(comp->GetHfaCount(call) == 1); #elif defined(TARGET_ARM) // ARM returns double in 2 float registers, but // `call->HasMultiRegRetVal()` count double registers. assert(comp->GetHfaCount(call) <= 2); #else // !TARGET_ARM64 && !TARGET_ARM NYI("Unknown architecture"); #endif // !TARGET_ARM64 && !TARGET_ARM var_types hfaType = comp->GetHfaType(call); if (call->TypeIs(hfaType)) { return; } } } CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd; Compiler::structPassingKind howToReturnStruct; var_types returnType = comp->getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct); assert(returnType != TYP_STRUCT && returnType != TYP_UNKNOWN); var_types origType = call->TypeGet(); call->gtType = genActualType(returnType); LIR::Use callUse; if (BlockRange().TryGetUse(call, &callUse)) { GenTree* user = callUse.User(); switch (user->OperGet()) { case GT_RETURN: case GT_STORE_LCL_VAR: case GT_STORE_BLK: case GT_STORE_OBJ: // Leave as is, the user will handle it. assert(user->TypeIs(origType) || varTypeIsSIMD(user->TypeGet())); break; #ifdef FEATURE_SIMD case GT_STORE_LCL_FLD: // If the call type was ever updated (in importer) to TYP_SIMD*, it should match the user type. // If not, the user type should match the struct's returnType. assert((varTypeIsSIMD(user) && user->TypeIs(origType)) || (returnType == user->TypeGet())); break; #endif // FEATURE_SIMD case GT_STOREIND: #ifdef FEATURE_SIMD if (varTypeIsSIMD(user)) { user->ChangeType(returnType); break; } #endif // FEATURE_SIMD // importer has a separate mechanism to retype calls to helpers, // keep it for now. assert(user->TypeIs(TYP_REF) || (user->TypeIs(TYP_I_IMPL) && comp->IsTargetAbi(CORINFO_CORERT_ABI))); assert(call->IsHelperCall()); assert(returnType == user->TypeGet()); break; default: unreached(); } } } //---------------------------------------------------------------------------------------------- // LowerStoreSingleRegCallStruct: Lowers a store block where the source is a struct typed call. // // Arguments: // store - The store node to lower. // // Notes: // - the function is only for calls that return one register; // - it spills the call's result if it can be retyped as a primitive type; // void Lowering::LowerStoreSingleRegCallStruct(GenTreeBlk* store) { assert(store->Data()->IsCall()); GenTreeCall* call = store->Data()->AsCall(); assert(!call->HasMultiRegRetVal()); const ClassLayout* layout = store->GetLayout(); const var_types regType = layout->GetRegisterType(); if (regType != TYP_UNDEF) { store->ChangeType(regType); store->SetOper(GT_STOREIND); LowerStoreIndirCommon(store->AsStoreInd()); return; } else { #if defined(WINDOWS_AMD64_ABI) // All ABI except Windows x64 supports passing 3 byte structs in registers. // Other 64 bites ABI-s support passing 5, 6, 7 byte structs. unreached(); #else // !WINDOWS_AMD64_ABI if (store->OperIs(GT_STORE_OBJ)) { store->SetOper(GT_STORE_BLK); } store->gtBlkOpKind = GenTreeObj::BlkOpKindUnroll; GenTreeLclVar* spilledCall = SpillStructCallResult(call); store->SetData(spilledCall); LowerBlockStoreCommon(store); #endif // WINDOWS_AMD64_ABI } } #if !defined(WINDOWS_AMD64_ABI) //---------------------------------------------------------------------------------------------- // SpillStructCallResult: Spill call result to memory. // // Arguments: // call - call with 3, 5, 6 or 7 return size that has to be spilled to memory. // // Return Value: // load of the spilled variable. // GenTreeLclVar* Lowering::SpillStructCallResult(GenTreeCall* call) const { // TODO-1stClassStructs: we can support this in codegen for `GT_STORE_BLK` without new temps. const unsigned spillNum = comp->lvaGrabTemp(true DEBUGARG("Return value temp for an odd struct return size")); comp->lvaSetVarDoNotEnregister(spillNum DEBUGARG(DoNotEnregisterReason::LocalField)); CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd; comp->lvaSetStruct(spillNum, retClsHnd, false); GenTreeLclFld* spill = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, call->gtType, spillNum, 0); spill->gtOp1 = call; spill->gtFlags |= GTF_VAR_DEF; BlockRange().InsertAfter(call, spill); ContainCheckStoreLoc(spill); GenTreeLclVar* loadCallResult = comp->gtNewLclvNode(spillNum, TYP_STRUCT)->AsLclVar(); BlockRange().InsertAfter(spill, loadCallResult); return loadCallResult; } #endif // !WINDOWS_AMD64_ABI GenTree* Lowering::LowerDirectCall(GenTreeCall* call) { noway_assert(call->gtCallType == CT_USER_FUNC || call->gtCallType == CT_HELPER); // Non-virtual direct/indirect calls: Work out if the address of the // call is known at JIT time. If not it is either an indirect call // or the address must be accessed via an single/double indirection. void* addr; InfoAccessType accessType; CorInfoHelpFunc helperNum = comp->eeGetHelperNum(call->gtCallMethHnd); #ifdef FEATURE_READYTORUN if (call->gtEntryPoint.addr != nullptr) { accessType = call->gtEntryPoint.accessType; addr = call->gtEntryPoint.addr; } else #endif if (call->gtCallType == CT_HELPER) { noway_assert(helperNum != CORINFO_HELP_UNDEF); // the convention on getHelperFtn seems to be (it's not documented) // that it returns an address or if it returns null, pAddr is set to // another address, which requires an indirection void* pAddr; addr = comp->info.compCompHnd->getHelperFtn(helperNum, (void**)&pAddr); if (addr != nullptr) { assert(pAddr == nullptr); accessType = IAT_VALUE; } else { accessType = IAT_PVALUE; addr = pAddr; } } else { noway_assert(helperNum == CORINFO_HELP_UNDEF); CORINFO_ACCESS_FLAGS aflags = CORINFO_ACCESS_ANY; if (call->IsSameThis()) { aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_THIS); } if (!call->NeedsNullCheck()) { aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_NONNULL); } CORINFO_CONST_LOOKUP addrInfo; comp->info.compCompHnd->getFunctionEntryPoint(call->gtCallMethHnd, &addrInfo, aflags); accessType = addrInfo.accessType; addr = addrInfo.addr; } GenTree* result = nullptr; switch (accessType) { case IAT_VALUE: // Non-virtual direct call to known address. // For JIT helper based tailcall (only used on x86) the target // address is passed as an arg to the helper so we want a node for // it. if (!IsCallTargetInRange(addr) || call->IsTailCallViaJitHelper()) { result = AddrGen(addr); } else { // a direct call within range of hardware relative call instruction // stash the address for codegen call->gtDirectCallAddress = addr; } break; case IAT_PVALUE: { // If we are using an indirection cell for a direct call then apply // an optimization that loads the call target directly from the // indirection cell, instead of duplicating the tree. bool hasIndirectionCell = call->GetIndirectionCellArgKind() != NonStandardArgKind::None; if (!hasIndirectionCell) { // Non-virtual direct calls to addresses accessed by // a single indirection. GenTree* cellAddr = AddrGen(addr); #ifdef DEBUG cellAddr->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd; #endif GenTree* indir = Ind(cellAddr); result = indir; } break; } case IAT_PPVALUE: // Non-virtual direct calls to addresses accessed by // a double indirection. // // Expanding an IAT_PPVALUE here, will lose the opportunity // to Hoist/CSE the first indirection as it is an invariant load // assert(!"IAT_PPVALUE case in LowerDirectCall"); noway_assert(helperNum == CORINFO_HELP_UNDEF); result = AddrGen(addr); // Double-indirection. Load the address into a register // and call indirectly through the register // result = Ind(Ind(result)); break; case IAT_RELPVALUE: { // Non-virtual direct calls to addresses accessed by // a single relative indirection. GenTree* cellAddr = AddrGen(addr); GenTree* indir = Ind(cellAddr); result = comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, indir, AddrGen(addr)); break; } default: noway_assert(!"Bad accessType"); break; } return result; } GenTree* Lowering::LowerDelegateInvoke(GenTreeCall* call) { noway_assert(call->gtCallType == CT_USER_FUNC); assert((comp->info.compCompHnd->getMethodAttribs(call->gtCallMethHnd) & (CORINFO_FLG_DELEGATE_INVOKE | CORINFO_FLG_FINAL)) == (CORINFO_FLG_DELEGATE_INVOKE | CORINFO_FLG_FINAL)); GenTree* thisArgNode; if (call->IsTailCallViaJitHelper()) { const unsigned argNum = 0; fgArgTabEntry* thisArgTabEntry = comp->gtArgEntryByArgNum(call, argNum); thisArgNode = thisArgTabEntry->GetNode(); } else { thisArgNode = comp->gtGetThisArg(call); } assert(thisArgNode != nullptr); assert(thisArgNode->gtOper == GT_PUTARG_REG); GenTree* thisExpr = thisArgNode->AsOp()->gtOp1; // We're going to use the 'this' expression multiple times, so make a local to copy it. GenTree* base; if (thisExpr->OperIs(GT_LCL_VAR)) { base = comp->gtNewLclvNode(thisExpr->AsLclVar()->GetLclNum(), thisExpr->TypeGet()); } else if (thisExpr->OperIs(GT_LCL_FLD)) { base = comp->gtNewLclFldNode(thisExpr->AsLclFld()->GetLclNum(), thisExpr->TypeGet(), thisExpr->AsLclFld()->GetLclOffs()); } else { unsigned delegateInvokeTmp = comp->lvaGrabTemp(true DEBUGARG("delegate invoke call")); base = comp->gtNewLclvNode(delegateInvokeTmp, thisExpr->TypeGet()); LIR::Use thisExprUse(BlockRange(), &thisArgNode->AsOp()->gtOp1, thisArgNode); ReplaceWithLclVar(thisExprUse, delegateInvokeTmp); thisExpr = thisExprUse.Def(); // it's changed; reload it. } // replace original expression feeding into thisPtr with // [originalThis + offsetOfDelegateInstance] GenTree* newThisAddr = new (comp, GT_LEA) GenTreeAddrMode(TYP_BYREF, thisExpr, nullptr, 0, comp->eeGetEEInfo()->offsetOfDelegateInstance); GenTree* newThis = comp->gtNewOperNode(GT_IND, TYP_REF, newThisAddr); BlockRange().InsertAfter(thisExpr, newThisAddr, newThis); thisArgNode->AsOp()->gtOp1 = newThis; ContainCheckIndir(newThis->AsIndir()); // the control target is // [originalThis + firstTgtOffs] unsigned targetOffs = comp->eeGetEEInfo()->offsetOfDelegateFirstTarget; GenTree* result = new (comp, GT_LEA) GenTreeAddrMode(TYP_REF, base, nullptr, 0, targetOffs); GenTree* callTarget = Ind(result); // don't need to sequence and insert this tree, caller will do it return callTarget; } GenTree* Lowering::LowerIndirectNonvirtCall(GenTreeCall* call) { #ifdef TARGET_X86 if (call->gtCallCookie != nullptr) { NYI_X86("Morphing indirect non-virtual call with non-standard args"); } #endif // Indirect cookie calls gets transformed by fgMorphArgs as indirect call with non-standard args. // Hence we should never see this type of call in lower. noway_assert(call->gtCallCookie == nullptr); return nullptr; } //------------------------------------------------------------------------ // CreateReturnTrapSeq: Create a tree to perform a "return trap", used in PInvoke // epilogs to invoke a GC under a condition. The return trap checks some global // location (the runtime tells us where that is and how many indirections to make), // then, based on the result, conditionally calls a GC helper. We use a special node // for this because at this time (late in the compilation phases), introducing flow // is tedious/difficult. // // This is used for PInvoke inlining. // // Return Value: // Code tree to perform the action. // GenTree* Lowering::CreateReturnTrapSeq() { // The GT_RETURNTRAP node expands to this: // if (g_TrapReturningThreads) // { // RareDisablePreemptiveGC(); // } // The only thing to do here is build up the expression that evaluates 'g_TrapReturningThreads'. void* pAddrOfCaptureThreadGlobal = nullptr; int32_t* addrOfCaptureThreadGlobal = comp->info.compCompHnd->getAddrOfCaptureThreadGlobal(&pAddrOfCaptureThreadGlobal); GenTree* testTree; if (addrOfCaptureThreadGlobal != nullptr) { testTree = AddrGen(addrOfCaptureThreadGlobal); } else { testTree = Ind(AddrGen(pAddrOfCaptureThreadGlobal)); } return comp->gtNewOperNode(GT_RETURNTRAP, TYP_INT, Ind(testTree, TYP_INT)); } //------------------------------------------------------------------------ // SetGCState: Create a tree that stores the given constant (0 or 1) into the // thread's GC state field. // // This is used for PInvoke inlining. // // Arguments: // state - constant (0 or 1) to store into the thread's GC state field. // // Return Value: // Code tree to perform the action. // GenTree* Lowering::SetGCState(int state) { // Thread.offsetOfGcState = 0/1 assert(state == 0 || state == 1); const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo(); GenTree* base = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, TYP_I_IMPL, comp->info.compLvFrameListRoot); GenTree* stateNode = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_BYTE, state); GenTree* addr = new (comp, GT_LEA) GenTreeAddrMode(TYP_I_IMPL, base, nullptr, 1, pInfo->offsetOfGCState); GenTree* storeGcState = new (comp, GT_STOREIND) GenTreeStoreInd(TYP_BYTE, addr, stateNode); return storeGcState; } //------------------------------------------------------------------------ // CreateFrameLinkUpdate: Create a tree that either links or unlinks the // locally-allocated InlinedCallFrame from the Frame list. // // This is used for PInvoke inlining. // // Arguments: // action - whether to link (push) or unlink (pop) the Frame // // Return Value: // Code tree to perform the action. // GenTree* Lowering::CreateFrameLinkUpdate(FrameLinkAction action) { const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo(); const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = pInfo->inlinedCallFrameInfo; GenTree* TCB = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, TYP_I_IMPL, comp->info.compLvFrameListRoot); // Thread->m_pFrame GenTree* addr = new (comp, GT_LEA) GenTreeAddrMode(TYP_I_IMPL, TCB, nullptr, 1, pInfo->offsetOfThreadFrame); GenTree* data = nullptr; if (action == PushFrame) { // Thread->m_pFrame = &inlinedCallFrame; data = new (comp, GT_LCL_FLD_ADDR) GenTreeLclFld(GT_LCL_FLD_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfFrameVptr); } else { assert(action == PopFrame); // Thread->m_pFrame = inlinedCallFrame.m_pNext; data = new (comp, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, pInfo->inlinedCallFrameInfo.offsetOfFrameLink); } GenTree* storeInd = new (comp, GT_STOREIND) GenTreeStoreInd(TYP_I_IMPL, addr, data); return storeInd; } //------------------------------------------------------------------------ // InsertPInvokeMethodProlog: Create the code that runs at the start of // every method that has PInvoke calls. // // Initialize the TCB local and the InlinedCallFrame object. Then link ("push") // the InlinedCallFrame object on the Frame chain. The layout of InlinedCallFrame // is defined in vm/frames.h. See also vm/jitinterface.cpp for more information. // The offsets of these fields is returned by the VM in a call to ICorStaticInfo::getEEInfo(). // // The (current) layout is as follows: // // 64-bit 32-bit CORINFO_EE_INFO // offset offset field name offset when set // ----------------------------------------------------------------------------------------- // +00h +00h GS cookie offsetOfGSCookie // +08h +04h vptr for class InlinedCallFrame offsetOfFrameVptr method prolog // +10h +08h m_Next offsetOfFrameLink method prolog // +18h +0Ch m_Datum offsetOfCallTarget call site // +20h n/a m_StubSecretArg not set by JIT // +28h +10h m_pCallSiteSP offsetOfCallSiteSP x86: call site, and zeroed in method // prolog; // non-x86: method prolog (SP remains // constant in function, after prolog: no // localloc and PInvoke in same function) // +30h +14h m_pCallerReturnAddress offsetOfReturnAddress call site // +38h +18h m_pCalleeSavedFP offsetOfCalleeSavedFP not set by JIT // +1Ch m_pThread // +20h m_pSPAfterProlog offsetOfSPAfterProlog arm only // +20/24h JIT retval spill area (int) before call_gc ??? // +24/28h JIT retval spill area (long) before call_gc ??? // +28/2Ch Saved value of EBP method prolog ??? // // Note that in the VM, InlinedCallFrame is a C++ class whose objects have a 'this' pointer that points // to the InlinedCallFrame vptr (the 2nd field listed above), and the GS cookie is stored *before* // the object. When we link the InlinedCallFrame onto the Frame chain, we must point at this location, // and not at the beginning of the InlinedCallFrame local, which is actually the GS cookie. // // Return Value: // none // void Lowering::InsertPInvokeMethodProlog() { noway_assert(comp->info.compUnmanagedCallCountWithGCTransition); noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM); if (comp->opts.ShouldUsePInvokeHelpers()) { return; } JITDUMP("======= Inserting PInvoke method prolog\n"); // The first BB must be a scratch BB in order for us to be able to safely insert the P/Invoke prolog. assert(comp->fgFirstBBisScratch()); LIR::Range& firstBlockRange = LIR::AsRange(comp->fgFirstBB); const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo(); const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = pInfo->inlinedCallFrameInfo; // First arg: &compiler->lvaInlinedPInvokeFrameVar + callFrameInfo.offsetOfFrameVptr #if defined(DEBUG) const LclVarDsc* inlinedPInvokeDsc = comp->lvaGetDesc(comp->lvaInlinedPInvokeFrameVar); assert(inlinedPInvokeDsc->IsAddressExposed()); #endif // DEBUG GenTree* frameAddr = new (comp, GT_LCL_FLD_ADDR) GenTreeLclFld(GT_LCL_FLD_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfFrameVptr); // Call runtime helper to fill in our InlinedCallFrame and push it on the Frame list: // TCB = CORINFO_HELP_INIT_PINVOKE_FRAME(&symFrameStart, secretArg); // for x86, don't pass the secretArg. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) || defined(TARGET_ARM) GenTreeCall::Use* argList = comp->gtNewCallArgs(frameAddr); #else GenTreeCall::Use* argList = comp->gtNewCallArgs(frameAddr, PhysReg(REG_SECRET_STUB_PARAM)); #endif GenTree* call = comp->gtNewHelperCallNode(CORINFO_HELP_INIT_PINVOKE_FRAME, TYP_I_IMPL, argList); // some sanity checks on the frame list root vardsc const unsigned lclNum = comp->info.compLvFrameListRoot; const LclVarDsc* varDsc = comp->lvaGetDesc(lclNum); noway_assert(!varDsc->lvIsParam); noway_assert(varDsc->lvType == TYP_I_IMPL); GenTree* store = new (comp, GT_STORE_LCL_VAR) GenTreeLclVar(GT_STORE_LCL_VAR, TYP_I_IMPL, lclNum); store->AsOp()->gtOp1 = call; store->gtFlags |= GTF_VAR_DEF; GenTree* const insertionPoint = firstBlockRange.FirstNonCatchArgNode(); comp->fgMorphTree(store); firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, store)); DISPTREERANGE(firstBlockRange, store); #if !defined(TARGET_X86) && !defined(TARGET_ARM) // For x86, this step is done at the call site (due to stack pointer not being static in the function). // For arm32, CallSiteSP is set up by the call to CORINFO_HELP_INIT_PINVOKE_FRAME. // -------------------------------------------------------- // InlinedCallFrame.m_pCallSiteSP = @RSP; GenTreeLclFld* storeSP = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCallSiteSP); storeSP->gtOp1 = PhysReg(REG_SPBASE); storeSP->gtFlags |= GTF_VAR_DEF; assert(inlinedPInvokeDsc->lvDoNotEnregister); firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, storeSP)); DISPTREERANGE(firstBlockRange, storeSP); #endif // !defined(TARGET_X86) && !defined(TARGET_ARM) #if !defined(TARGET_ARM) // For arm32, CalleeSavedFP is set up by the call to CORINFO_HELP_INIT_PINVOKE_FRAME. // -------------------------------------------------------- // InlinedCallFrame.m_pCalleeSavedEBP = @RBP; GenTreeLclFld* storeFP = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCalleeSavedFP); assert(inlinedPInvokeDsc->lvDoNotEnregister); storeFP->gtOp1 = PhysReg(REG_FPBASE); storeFP->gtFlags |= GTF_VAR_DEF; firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, storeFP)); DISPTREERANGE(firstBlockRange, storeFP); #endif // !defined(TARGET_ARM) // -------------------------------------------------------- // On 32-bit targets, CORINFO_HELP_INIT_PINVOKE_FRAME initializes the PInvoke frame and then pushes it onto // the current thread's Frame stack. On 64-bit targets, it only initializes the PInvoke frame. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) { // Push a frame - if we are NOT in an IL stub, this is done right before the call // The init routine sets InlinedCallFrame's m_pNext, so we just set the thead's top-of-stack GenTree* frameUpd = CreateFrameLinkUpdate(PushFrame); firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, frameUpd)); ContainCheckStoreIndir(frameUpd->AsStoreInd()); DISPTREERANGE(firstBlockRange, frameUpd); } #endif // TARGET_64BIT } //------------------------------------------------------------------------ // InsertPInvokeMethodEpilog: Code that needs to be run when exiting any method // that has PInvoke inlines. This needs to be inserted any place you can exit the // function: returns, tailcalls and jmps. // // Arguments: // returnBB - basic block from which a method can return // lastExpr - GenTree of the last top level stmnt of returnBB (debug only arg) // // Return Value: // Code tree to perform the action. // void Lowering::InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* lastExpr)) { assert(returnBB != nullptr); assert(comp->info.compUnmanagedCallCountWithGCTransition); if (comp->opts.ShouldUsePInvokeHelpers()) { return; } JITDUMP("======= Inserting PInvoke method epilog\n"); // Method doing PInvoke calls has exactly one return block unless it has "jmp" or tail calls. assert(((returnBB == comp->genReturnBB) && (returnBB->bbJumpKind == BBJ_RETURN)) || returnBB->endsWithTailCallOrJmp(comp)); LIR::Range& returnBlockRange = LIR::AsRange(returnBB); GenTree* insertionPoint = returnBlockRange.LastNode(); assert(insertionPoint == lastExpr); // Note: PInvoke Method Epilog (PME) needs to be inserted just before GT_RETURN, GT_JMP or GT_CALL node in execution // order so that it is guaranteed that there will be no further PInvokes after that point in the method. // // Example1: GT_RETURN(op1) - say execution order is: Op1, GT_RETURN. After inserting PME, execution order would be // Op1, PME, GT_RETURN // // Example2: GT_CALL(arg side effect computing nodes, Stk Args Setup, Reg Args setup). The execution order would be // arg side effect computing nodes, Stk Args setup, Reg Args setup, GT_CALL // After inserting PME execution order would be: // arg side effect computing nodes, Stk Args setup, Reg Args setup, PME, GT_CALL // // Example3: GT_JMP. After inserting PME execution order would be: PME, GT_JMP // That is after PME, args for GT_JMP call will be setup. // Pop the frame if necessary. This always happens in the epilog on 32-bit targets. For 64-bit targets, we only do // this in the epilog for IL stubs; for non-IL stubs the frame is popped after every PInvoke call. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) #endif // TARGET_64BIT { GenTree* frameUpd = CreateFrameLinkUpdate(PopFrame); returnBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, frameUpd)); ContainCheckStoreIndir(frameUpd->AsStoreInd()); } } //------------------------------------------------------------------------ // InsertPInvokeCallProlog: Emit the call-site prolog for direct calls to unmanaged code. // It does all the necessary call-site setup of the InlinedCallFrame. // // Arguments: // call - the call for which we are inserting the PInvoke prolog. // // Return Value: // None. // void Lowering::InsertPInvokeCallProlog(GenTreeCall* call) { JITDUMP("======= Inserting PInvoke call prolog\n"); GenTree* insertBefore = call; if (call->gtCallType == CT_INDIRECT) { bool isClosed; insertBefore = BlockRange().GetTreeRange(call->gtCallAddr, &isClosed).FirstNode(); assert(isClosed); } const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = comp->eeGetEEInfo()->inlinedCallFrameInfo; gtCallTypes callType = (gtCallTypes)call->gtCallType; noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM); if (comp->opts.ShouldUsePInvokeHelpers()) { // First argument is the address of the frame variable. GenTree* frameAddr = new (comp, GT_LCL_VAR_ADDR) GenTreeLclVar(GT_LCL_VAR_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar); #if defined(TARGET_X86) && !defined(UNIX_X86_ABI) // On x86 targets, PInvoke calls need the size of the stack args in InlinedCallFrame.m_Datum. // This is because the callee pops stack arguments, and we need to keep track of this during stack // walking const unsigned numStkArgBytes = call->fgArgInfo->GetNextSlotByteOffset(); GenTree* stackBytes = comp->gtNewIconNode(numStkArgBytes, TYP_INT); GenTreeCall::Use* args = comp->gtNewCallArgs(frameAddr, stackBytes); #else GenTreeCall::Use* args = comp->gtNewCallArgs(frameAddr); #endif // Insert call to CORINFO_HELP_JIT_PINVOKE_BEGIN GenTree* helperCall = comp->gtNewHelperCallNode(CORINFO_HELP_JIT_PINVOKE_BEGIN, TYP_VOID, args); comp->fgMorphTree(helperCall); BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, helperCall)); LowerNode(helperCall); // helper call is inserted before current node and should be lowered here. return; } // Emit the following sequence: // // InlinedCallFrame.callTarget = methodHandle // stored in m_Datum // InlinedCallFrame.m_pCallSiteSP = SP // x86 only // InlinedCallFrame.m_pCallerReturnAddress = return address // GT_START_PREEEMPTC // Thread.gcState = 0 // (non-stub) - update top Frame on TCB // 64-bit targets only // ---------------------------------------------------------------------------------- // Setup InlinedCallFrame.callSiteTarget (which is how the JIT refers to it). // The actual field is InlinedCallFrame.m_Datum which has many different uses and meanings. GenTree* src = nullptr; if (callType == CT_INDIRECT) { #if !defined(TARGET_64BIT) // On 32-bit targets, indirect calls need the size of the stack args in InlinedCallFrame.m_Datum. const unsigned stackByteOffset = call->fgArgInfo->GetNextSlotByteOffset(); src = comp->gtNewIconNode(stackByteOffset, TYP_INT); #else // On 64-bit targets, indirect calls may need the stub parameter value in InlinedCallFrame.m_Datum. // If the stub parameter value is not needed, m_Datum will be initialized by the VM. if (comp->info.compPublishStubParam) { src = comp->gtNewLclvNode(comp->lvaStubArgumentVar, TYP_I_IMPL); } #endif // !defined(TARGET_64BIT) } else { assert(callType == CT_USER_FUNC); void* pEmbedMethodHandle = nullptr; CORINFO_METHOD_HANDLE embedMethodHandle = comp->info.compCompHnd->embedMethodHandle(call->gtCallMethHnd, &pEmbedMethodHandle); noway_assert((!embedMethodHandle) != (!pEmbedMethodHandle)); if (embedMethodHandle != nullptr) { // InlinedCallFrame.callSiteTarget = methodHandle src = AddrGen(embedMethodHandle); } else { // InlinedCallFrame.callSiteTarget = *pEmbedMethodHandle src = Ind(AddrGen(pEmbedMethodHandle)); } } if (src != nullptr) { // Store into InlinedCallFrame.m_Datum, the offset of which is given by offsetOfCallTarget. GenTreeLclFld* store = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCallTarget); store->gtOp1 = src; store->gtFlags |= GTF_VAR_DEF; InsertTreeBeforeAndContainCheck(insertBefore, store); } #ifdef TARGET_X86 // ---------------------------------------------------------------------------------- // InlinedCallFrame.m_pCallSiteSP = SP GenTreeLclFld* storeCallSiteSP = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCallSiteSP); storeCallSiteSP->gtOp1 = PhysReg(REG_SPBASE); storeCallSiteSP->gtFlags |= GTF_VAR_DEF; InsertTreeBeforeAndContainCheck(insertBefore, storeCallSiteSP); #endif // ---------------------------------------------------------------------------------- // InlinedCallFrame.m_pCallerReturnAddress = &label (the address of the instruction immediately following the call) GenTreeLclFld* storeLab = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfReturnAddress); storeLab->gtOp1 = new (comp, GT_LABEL) GenTree(GT_LABEL, TYP_I_IMPL); storeLab->gtFlags |= GTF_VAR_DEF; InsertTreeBeforeAndContainCheck(insertBefore, storeLab); // Push the PInvoke frame if necessary. On 32-bit targets this only happens in the method prolog if a method // contains PInvokes; on 64-bit targets this is necessary in non-stubs. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (!comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) { // Set the TCB's frame to be the one we just created. // Note the init routine for the InlinedCallFrame (CORINFO_HELP_INIT_PINVOKE_FRAME) // has prepended it to the linked list to maintain the stack of Frames. // // Stubs do this once per stub, not once per call. GenTree* frameUpd = CreateFrameLinkUpdate(PushFrame); BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, frameUpd)); ContainCheckStoreIndir(frameUpd->AsStoreInd()); } #endif // TARGET_64BIT // IMPORTANT **** This instruction must be the last real instruction **** // It changes the thread's state to Preemptive mode // ---------------------------------------------------------------------------------- // [tcb + offsetOfGcState] = 0 GenTree* storeGCState = SetGCState(0); BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, storeGCState)); ContainCheckStoreIndir(storeGCState->AsStoreInd()); // Indicate that codegen has switched this thread to preemptive GC. // This tree node doesn't generate any code, but impacts LSRA and gc reporting. // This tree node is simple so doesn't require sequencing. GenTree* preemptiveGCNode = new (comp, GT_START_PREEMPTGC) GenTree(GT_START_PREEMPTGC, TYP_VOID); BlockRange().InsertBefore(insertBefore, preemptiveGCNode); } //------------------------------------------------------------------------ // InsertPInvokeCallEpilog: Insert the code that goes after every inlined pinvoke call. // // Arguments: // call - the call for which we are inserting the PInvoke epilog. // // Return Value: // None. // void Lowering::InsertPInvokeCallEpilog(GenTreeCall* call) { JITDUMP("======= Inserting PInvoke call epilog\n"); if (comp->opts.ShouldUsePInvokeHelpers()) { noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM); // First argument is the address of the frame variable. GenTree* frameAddr = comp->gtNewLclVarAddrNode(comp->lvaInlinedPInvokeFrameVar, TYP_BYREF); #if defined(DEBUG) const LclVarDsc* inlinedPInvokeDsc = comp->lvaGetDesc(comp->lvaInlinedPInvokeFrameVar); assert(inlinedPInvokeDsc->IsAddressExposed()); #endif // DEBUG // Insert call to CORINFO_HELP_JIT_PINVOKE_END GenTreeCall* helperCall = comp->gtNewHelperCallNode(CORINFO_HELP_JIT_PINVOKE_END, TYP_VOID, comp->gtNewCallArgs(frameAddr)); comp->fgMorphTree(helperCall); BlockRange().InsertAfter(call, LIR::SeqTree(comp, helperCall)); ContainCheckCallOperands(helperCall); return; } // gcstate = 1 GenTree* insertionPoint = call->gtNext; GenTree* tree = SetGCState(1); BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree)); ContainCheckStoreIndir(tree->AsStoreInd()); tree = CreateReturnTrapSeq(); BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree)); ContainCheckReturnTrap(tree->AsOp()); // Pop the frame if necessary. On 32-bit targets this only happens in the method epilog; on 64-bit targets thi // happens after every PInvoke call in non-stubs. 32-bit targets instead mark the frame as inactive. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (!comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) { tree = CreateFrameLinkUpdate(PopFrame); BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree)); ContainCheckStoreIndir(tree->AsStoreInd()); } #else const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = comp->eeGetEEInfo()->inlinedCallFrameInfo; // ---------------------------------------------------------------------------------- // InlinedCallFrame.m_pCallerReturnAddress = nullptr GenTreeLclFld* const storeCallSiteTracker = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfReturnAddress); GenTreeIntCon* const constantZero = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, 0); storeCallSiteTracker->gtOp1 = constantZero; storeCallSiteTracker->gtFlags |= GTF_VAR_DEF; BlockRange().InsertBefore(insertionPoint, constantZero, storeCallSiteTracker); ContainCheckStoreLoc(storeCallSiteTracker); #endif // TARGET_64BIT } //------------------------------------------------------------------------ // LowerNonvirtPinvokeCall: Lower a non-virtual / indirect PInvoke call // // Arguments: // call - The call to lower. // // Return Value: // The lowered call tree. // GenTree* Lowering::LowerNonvirtPinvokeCall(GenTreeCall* call) { // PInvoke lowering varies depending on the flags passed in by the EE. By default, // GC transitions are generated inline; if CORJIT_FLAG_USE_PINVOKE_HELPERS is specified, // GC transitions are instead performed using helper calls. Examples of each case are given // below. Note that the data structure that is used to store information about a call frame // containing any P/Invoke calls is initialized in the method prolog (see // InsertPInvokeMethod{Prolog,Epilog} for details). // // Inline transitions: // InlinedCallFrame inlinedCallFrame; // // ... // // // Set up frame information // inlinedCallFrame.callTarget = methodHandle; // stored in m_Datum // inlinedCallFrame.m_pCallSiteSP = SP; // x86 only // inlinedCallFrame.m_pCallerReturnAddress = &label; (the address of the instruction immediately following the // call) // Thread.m_pFrame = &inlinedCallFrame; (non-IL-stub only) // // // Switch the thread's GC mode to preemptive mode // thread->m_fPreemptiveGCDisabled = 0; // // // Call the unmanaged method // target(); // // // Switch the thread's GC mode back to cooperative mode // thread->m_fPreemptiveGCDisabled = 1; // // // Rendezvous with a running collection if necessary // if (g_TrapReturningThreads) // RareDisablePreemptiveGC(); // // Transistions using helpers: // // OpaqueFrame opaqueFrame; // // ... // // // Call the JIT_PINVOKE_BEGIN helper // JIT_PINVOKE_BEGIN(&opaqueFrame); // // // Call the unmanaged method // target(); // // // Call the JIT_PINVOKE_END helper // JIT_PINVOKE_END(&opaqueFrame); // // Note that the JIT_PINVOKE_{BEGIN.END} helpers currently use the default calling convention for the target // platform. They may be changed in the future such that they preserve all register values. GenTree* result = nullptr; // All code generated by this function must not contain the randomly-inserted NOPs // that we insert to inhibit JIT spraying in partial trust scenarios. // The PINVOKE_PROLOG op signals this to the code generator/emitter. GenTree* prolog = new (comp, GT_NOP) GenTree(GT_PINVOKE_PROLOG, TYP_VOID); BlockRange().InsertBefore(call, prolog); bool addPInvokePrologEpilog = !call->IsSuppressGCTransition(); if (addPInvokePrologEpilog) { InsertPInvokeCallProlog(call); } if (call->gtCallType != CT_INDIRECT) { noway_assert(call->gtCallType == CT_USER_FUNC); CORINFO_METHOD_HANDLE methHnd = call->gtCallMethHnd; CORINFO_CONST_LOOKUP lookup; comp->info.compCompHnd->getAddressOfPInvokeTarget(methHnd, &lookup); void* addr = lookup.addr; GenTree* addrTree; switch (lookup.accessType) { case IAT_VALUE: // IsCallTargetInRange always return true on x64. It wants to use rip-based addressing // for this call. Unfortunately, in case of pinvokes (+suppressgctransition) to external libs // (e.g. kernel32.dll) the relative offset is unlikely to fit into int32 and we will have to // turn fAllowRel32 off globally. if ((call->IsSuppressGCTransition() && !comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) || !IsCallTargetInRange(addr)) { result = AddrGen(addr); } else { // a direct call within range of hardware relative call instruction // stash the address for codegen call->gtDirectCallAddress = addr; #ifdef FEATURE_READYTORUN call->gtEntryPoint.addr = nullptr; call->gtEntryPoint.accessType = IAT_VALUE; #endif } break; case IAT_PVALUE: addrTree = AddrGen(addr); #ifdef DEBUG addrTree->AsIntCon()->gtTargetHandle = (size_t)methHnd; #endif result = Ind(addrTree); break; case IAT_PPVALUE: // ToDo: Expanding an IAT_PPVALUE here, loses the opportunity // to Hoist/CSE the first indirection as it is an invariant load // // This case currently occurs today when we make PInvoke calls in crossgen // // assert(!"IAT_PPVALUE in Lowering::LowerNonvirtPinvokeCall"); addrTree = AddrGen(addr); #ifdef DEBUG addrTree->AsIntCon()->gtTargetHandle = (size_t)methHnd; #endif // Double-indirection. Load the address into a register // and call indirectly through the register // result = Ind(Ind(addrTree)); break; case IAT_RELPVALUE: unreached(); } } if (addPInvokePrologEpilog) { InsertPInvokeCallEpilog(call); } return result; } // Expand the code necessary to calculate the control target. // Returns: the expression needed to calculate the control target // May insert embedded statements GenTree* Lowering::LowerVirtualVtableCall(GenTreeCall* call) { noway_assert(call->gtCallType == CT_USER_FUNC); regNumber thisPtrArgReg = comp->codeGen->genGetThisArgReg(call); // get a reference to the thisPtr being passed fgArgTabEntry* argEntry = comp->gtArgEntryByArgNum(call, 0); assert(argEntry->GetRegNum() == thisPtrArgReg); assert(argEntry->GetNode()->OperIs(GT_PUTARG_REG)); GenTree* thisPtr = argEntry->GetNode()->AsUnOp()->gtGetOp1(); // If what we are passing as the thisptr is not already a local, make a new local to place it in // because we will be creating expressions based on it. unsigned lclNum; if (thisPtr->OperIsLocal()) { lclNum = thisPtr->AsLclVarCommon()->GetLclNum(); } else { // Split off the thisPtr and store to a temporary variable. if (vtableCallTemp == BAD_VAR_NUM) { vtableCallTemp = comp->lvaGrabTemp(true DEBUGARG("virtual vtable call")); } LIR::Use thisPtrUse(BlockRange(), &(argEntry->GetNode()->AsUnOp()->gtOp1), argEntry->GetNode()); ReplaceWithLclVar(thisPtrUse, vtableCallTemp); lclNum = vtableCallTemp; } // Get hold of the vtable offset (note: this might be expensive) unsigned vtabOffsOfIndirection; unsigned vtabOffsAfterIndirection; bool isRelative; comp->info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection, &vtabOffsAfterIndirection, &isRelative); // If the thisPtr is a local field, then construct a local field type node GenTree* local; if (thisPtr->isLclField()) { local = new (comp, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, thisPtr->TypeGet(), lclNum, thisPtr->AsLclFld()->GetLclOffs()); } else { local = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, thisPtr->TypeGet(), lclNum); } // pointer to virtual table = [REG_CALL_THIS + offs] GenTree* result = Ind(Offset(local, VPTR_OFFS)); // Get the appropriate vtable chunk if (vtabOffsOfIndirection != CORINFO_VIRTUALCALL_NO_CHUNK) { if (isRelative) { // MethodTable offset is a relative pointer. // // Additional temporary variable is used to store virtual table pointer. // Address of method is obtained by the next computations: // // Save relative offset to tmp (vtab is virtual table pointer, vtabOffsOfIndirection is offset of // vtable-1st-level-indirection): // tmp = vtab // // Save address of method to result (vtabOffsAfterIndirection is offset of vtable-2nd-level-indirection): // result = [tmp + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp + vtabOffsOfIndirection]] // // // If relative pointers are also in second level indirection, additional temporary is used: // tmp1 = vtab // tmp2 = tmp1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp1 + vtabOffsOfIndirection] // result = tmp2 + [tmp2] // unsigned lclNumTmp = comp->lvaGrabTemp(true DEBUGARG("lclNumTmp")); unsigned lclNumTmp2 = comp->lvaGrabTemp(true DEBUGARG("lclNumTmp2")); GenTree* lclvNodeStore = comp->gtNewTempAssign(lclNumTmp, result); GenTree* tmpTree = comp->gtNewLclvNode(lclNumTmp, result->TypeGet()); tmpTree = Offset(tmpTree, vtabOffsOfIndirection); tmpTree = comp->gtNewOperNode(GT_IND, TYP_I_IMPL, tmpTree, false); GenTree* offs = comp->gtNewIconNode(vtabOffsOfIndirection + vtabOffsAfterIndirection, TYP_INT); result = comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, comp->gtNewLclvNode(lclNumTmp, result->TypeGet()), offs); GenTree* base = OffsetByIndexWithScale(result, tmpTree, 1); GenTree* lclvNodeStore2 = comp->gtNewTempAssign(lclNumTmp2, base); LIR::Range range = LIR::SeqTree(comp, lclvNodeStore); JITDUMP("result of obtaining pointer to virtual table:\n"); DISPRANGE(range); BlockRange().InsertBefore(call, std::move(range)); LIR::Range range2 = LIR::SeqTree(comp, lclvNodeStore2); ContainCheckIndir(tmpTree->AsIndir()); JITDUMP("result of obtaining pointer to virtual table 2nd level indirection:\n"); DISPRANGE(range2); BlockRange().InsertAfter(lclvNodeStore, std::move(range2)); result = Ind(comp->gtNewLclvNode(lclNumTmp2, result->TypeGet())); result = comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, result, comp->gtNewLclvNode(lclNumTmp2, result->TypeGet())); } else { // result = [REG_CALL_IND_SCRATCH + vtabOffsOfIndirection] result = Ind(Offset(result, vtabOffsOfIndirection)); } } else { assert(!isRelative); } // Load the function address // result = [reg+vtabOffs] if (!isRelative) { result = Ind(Offset(result, vtabOffsAfterIndirection)); } return result; } // Lower stub dispatched virtual calls. GenTree* Lowering::LowerVirtualStubCall(GenTreeCall* call) { assert(call->IsVirtualStub()); // An x86 JIT which uses full stub dispatch must generate only // the following stub dispatch calls: // // (1) isCallRelativeIndirect: // call dword ptr [rel32] ; FF 15 ---rel32---- // (2) isCallRelative: // call abc ; E8 ---rel32---- // (3) isCallRegisterIndirect: // 3-byte nop ; // call dword ptr [eax] ; FF 10 // // THIS IS VERY TIGHTLY TIED TO THE PREDICATES IN // vm\i386\cGenCpu.h, esp. isCallRegisterIndirect. GenTree* result = nullptr; // This is code to set up an indirect call to a stub address computed // via dictionary lookup. if (call->gtCallType == CT_INDIRECT) { // The importer decided we needed a stub call via a computed // stub dispatch address, i.e. an address which came from a dictionary lookup. // - The dictionary lookup produces an indirected address, suitable for call // via "call [VirtualStubParam.reg]" // // This combination will only be generated for shared generic code and when // stub dispatch is active. // fgMorphArgs will have created trees to pass the address in VirtualStubParam.reg. // All we have to do here is add an indirection to generate the actual call target. GenTree* ind = Ind(call->gtCallAddr); BlockRange().InsertAfter(call->gtCallAddr, ind); call->gtCallAddr = ind; ind->gtFlags |= GTF_IND_REQ_ADDR_IN_REG; ContainCheckIndir(ind->AsIndir()); } else { // Direct stub call. // Get stub addr. This will return NULL if virtual call stubs are not active void* stubAddr = call->gtStubCallStubAddr; noway_assert(stubAddr != nullptr); // If not CT_INDIRECT, then it should always be relative indir call. // This is ensured by VM. noway_assert(call->IsVirtualStubRelativeIndir()); // Direct stub calls, though the stubAddr itself may still need to be // accessed via an indirection. GenTree* addr = AddrGen(stubAddr); // On x86, for tailcall via helper, the JIT_TailCall helper takes the stubAddr as // the target address, and we set a flag that it's a VSD call. The helper then // handles any necessary indirection. if (call->IsTailCallViaJitHelper()) { result = addr; } else { bool shouldOptimizeVirtualStubCall = false; #if defined(TARGET_ARMARCH) || defined(TARGET_AMD64) // Skip inserting the indirection node to load the address that is already // computed in the VSD stub arg register as a hidden parameter. Instead during the // codegen, just load the call target from there. shouldOptimizeVirtualStubCall = !comp->opts.IsCFGEnabled(); #endif if (!shouldOptimizeVirtualStubCall) { result = Ind(addr); } } } // TODO-Cleanup: start emitting random NOPS return result; } //------------------------------------------------------------------------ // Lowering::AreSourcesPossibleModifiedLocals: // Given two nodes which will be used in an addressing mode (base, // index), check to see if they are lclVar reads, and if so, walk // backwards from the use until both reads have been visited to // determine if they are potentially modified in that range. // // Arguments: // addr - the node that uses the base and index nodes // base - the base node // index - the index node // // Returns: true if either the base or index may be modified between the // node and addr. // bool Lowering::AreSourcesPossiblyModifiedLocals(GenTree* addr, GenTree* base, GenTree* index) { assert(addr != nullptr); SideEffectSet baseSideEffects; if (base != nullptr) { if (base->OperIsLocalRead()) { baseSideEffects.AddNode(comp, base); } else { base = nullptr; } } SideEffectSet indexSideEffects; if (index != nullptr) { if (index->OperIsLocalRead()) { indexSideEffects.AddNode(comp, index); } else { index = nullptr; } } for (GenTree* cursor = addr;; cursor = cursor->gtPrev) { assert(cursor != nullptr); if (cursor == base) { base = nullptr; } if (cursor == index) { index = nullptr; } if ((base == nullptr) && (index == nullptr)) { return false; } m_scratchSideEffects.Clear(); m_scratchSideEffects.AddNode(comp, cursor); if ((base != nullptr) && m_scratchSideEffects.InterferesWith(baseSideEffects, false)) { return true; } if ((index != nullptr) && m_scratchSideEffects.InterferesWith(indexSideEffects, false)) { return true; } } } //------------------------------------------------------------------------ // TryCreateAddrMode: recognize trees which can be implemented using an // addressing mode and transform them to a GT_LEA // // Arguments: // addr - the use of the address we want to transform // isContainable - true if this addressing mode can be contained // parent - the node that consumes the given addr (most likely it's an IND) // // Returns: // true if the address node was changed to a LEA, false otherwise. // bool Lowering::TryCreateAddrMode(GenTree* addr, bool isContainable, GenTree* parent) { if (!addr->OperIs(GT_ADD) || addr->gtOverflow()) { return false; } #ifdef TARGET_ARM64 if (parent->OperIsIndir() && parent->AsIndir()->IsVolatile() && !varTypeIsGC(addr)) { // For Arm64 we avoid using LEA for volatile INDs // because we won't be able to use ldar/star return false; } #endif GenTree* base = nullptr; GenTree* index = nullptr; unsigned scale = 0; ssize_t offset = 0; bool rev = false; // Find out if an addressing mode can be constructed bool doAddrMode = comp->codeGen->genCreateAddrMode(addr, // address true, // fold &rev, // reverse ops &base, // base addr &index, // index val &scale, // scaling &offset); // displacement var_types targetType = parent->OperIsIndir() ? parent->TypeGet() : TYP_UNDEF; #ifdef TARGET_ARMARCH // Multiplier should be a "natural-scale" power of two number which is equal to target's width. // // *(ulong*)(data + index * 8); - can be optimized // *(ulong*)(data + index * 7); - can not be optimized // *(int*)(data + index * 2); - can not be optimized // if ((scale > 0) && (genTypeSize(targetType) != scale)) { return false; } #endif if (scale == 0) { scale = 1; } if (!isContainable) { // this is just a reg-const add if (index == nullptr) { return false; } // this is just a reg-reg add if ((scale == 1) && (offset == 0)) { return false; } } // make sure there are not any side effects between def of leaves and use if (!doAddrMode || AreSourcesPossiblyModifiedLocals(addr, base, index)) { JITDUMP("No addressing mode:\n "); DISPNODE(addr); return false; } JITDUMP("Addressing mode:\n"); JITDUMP(" Base\n "); DISPNODE(base); if (index != nullptr) { JITDUMP(" + Index * %u + %d\n ", scale, offset); DISPNODE(index); } else { JITDUMP(" + %d\n", offset); } // Save the (potentially) unused operands before changing the address to LEA. ArrayStack<GenTree*> unusedStack(comp->getAllocator(CMK_ArrayStack)); unusedStack.Push(addr->AsOp()->gtGetOp1()); unusedStack.Push(addr->AsOp()->gtGetOp2()); addr->ChangeOper(GT_LEA); // Make sure there are no leftover side effects (though the existing ADD we're // changing shouldn't have any at this point, but sometimes it does). addr->gtFlags &= ~GTF_ALL_EFFECT; GenTreeAddrMode* addrMode = addr->AsAddrMode(); addrMode->SetBase(base); addrMode->SetIndex(index); addrMode->SetScale(scale); addrMode->SetOffset(static_cast<int>(offset)); // Neither the base nor the index should now be contained. if (base != nullptr) { base->ClearContained(); } if (index != nullptr) { index->ClearContained(); } // Remove all the nodes that are no longer used. while (!unusedStack.Empty()) { GenTree* unused = unusedStack.Pop(); // Use a loop to process some of the nodes iteratively // instead of pushing them on the stack. while ((unused != base) && (unused != index)) { JITDUMP("Removing unused node:\n "); DISPNODE(unused); BlockRange().Remove(unused); if (unused->OperIs(GT_ADD, GT_MUL, GT_LSH)) { // Push the first operand and loop back to process the second one. // This minimizes the stack depth because the second one tends to be // a constant so it gets processed and then the first one gets popped. unusedStack.Push(unused->AsOp()->gtGetOp1()); unused = unused->AsOp()->gtGetOp2(); } else { assert(unused->OperIs(GT_CNS_INT)); break; } } } #ifdef TARGET_ARM64 // Check if we can "contain" LEA(BFIZ) in order to extend 32bit index to 64bit as part of load/store. if ((index != nullptr) && index->OperIs(GT_BFIZ) && index->gtGetOp1()->OperIs(GT_CAST) && index->gtGetOp2()->IsCnsIntOrI() && (varTypeIsIntegral(targetType) || varTypeIsFloating(targetType))) { // BFIZ node is a binary op where op1 is GT_CAST and op2 is GT_CNS_INT GenTreeCast* cast = index->gtGetOp1()->AsCast(); assert(cast->isContained()); const unsigned shiftBy = (unsigned)index->gtGetOp2()->AsIntCon()->IconValue(); // 'scale' and 'offset' have to be unset since we're going to use [base + index * SXTW/UXTW scale] form // where there is no room for additional offsets/scales on ARM64. 'shiftBy' has to match target's width. if (cast->CastOp()->TypeIs(TYP_INT) && cast->TypeIs(TYP_LONG) && (genTypeSize(targetType) == (1U << shiftBy)) && (scale == 1) && (offset == 0)) { // TODO: Make sure that genCreateAddrMode marks such BFIZ candidates as GTF_DONT_CSE for better CQ. MakeSrcContained(addrMode, index); } } #endif JITDUMP("New addressing mode node:\n "); DISPNODE(addrMode); JITDUMP("\n"); return true; } //------------------------------------------------------------------------ // LowerAdd: turn this add into a GT_LEA if that would be profitable // // Arguments: // node - the node we care about // // Returns: // nullptr if no transformation was done, or the next node in the transformed node sequence that // needs to be lowered. // GenTree* Lowering::LowerAdd(GenTreeOp* node) { if (varTypeIsIntegralOrI(node->TypeGet())) { GenTree* op1 = node->gtGetOp1(); GenTree* op2 = node->gtGetOp2(); LIR::Use use; // It is not the best place to do such simple arithmetic optimizations, // but it allows us to avoid `LEA(addr, 0)` nodes and doing that in morph // requires more changes. Delete that part if we get an expression optimizer. if (op2->IsIntegralConst(0)) { JITDUMP("Lower: optimize val + 0: "); DISPNODE(node); JITDUMP("Replaced with: "); DISPNODE(op1); if (BlockRange().TryGetUse(node, &use)) { use.ReplaceWith(op1); } else { op1->SetUnusedValue(); } GenTree* next = node->gtNext; BlockRange().Remove(op2); BlockRange().Remove(node); JITDUMP("Remove [%06u], [%06u]\n", op2->gtTreeID, node->gtTreeID); return next; } #ifndef TARGET_ARMARCH if (BlockRange().TryGetUse(node, &use)) { // If this is a child of an indir, let the parent handle it. // If there is a chain of adds, only look at the topmost one. GenTree* parent = use.User(); if (!parent->OperIsIndir() && !parent->OperIs(GT_ADD)) { TryCreateAddrMode(node, false, parent); } } #endif // !TARGET_ARMARCH } if (node->OperIs(GT_ADD)) { ContainCheckBinary(node); } return nullptr; } //------------------------------------------------------------------------ // LowerUnsignedDivOrMod: Lowers a GT_UDIV/GT_UMOD node. // // Arguments: // divMod - pointer to the GT_UDIV/GT_UMOD node to be lowered // // Return Value: // Returns a boolean indicating whether the node was transformed. // // Notes: // - Transform UDIV/UMOD by power of 2 into RSZ/AND // - Transform UDIV by constant >= 2^(N-1) into GE // - Transform UDIV/UMOD by constant >= 3 into "magic division" // bool Lowering::LowerUnsignedDivOrMod(GenTreeOp* divMod) { assert(divMod->OperIs(GT_UDIV, GT_UMOD)); #if defined(USE_HELPERS_FOR_INT_DIV) if (!varTypeIsIntegral(divMod->TypeGet())) { assert(!"unreachable: integral GT_UDIV/GT_UMOD should get morphed into helper calls"); } assert(varTypeIsFloating(divMod->TypeGet())); #endif // USE_HELPERS_FOR_INT_DIV #if defined(TARGET_ARM64) assert(divMod->OperGet() != GT_UMOD); #endif // TARGET_ARM64 GenTree* dividend = divMod->gtGetOp1(); GenTree* divisor = divMod->gtGetOp2(); #if !defined(TARGET_64BIT) if (dividend->OperIs(GT_LONG)) { return false; } #endif if (!divisor->IsCnsIntOrI()) { return false; } if (dividend->IsCnsIntOrI()) { // We shouldn't see a divmod with constant operands here but if we do then it's likely // because optimizations are disabled or it's a case that's supposed to throw an exception. // Don't optimize this. return false; } const var_types type = divMod->TypeGet(); assert((type == TYP_INT) || (type == TYP_I_IMPL)); size_t divisorValue = static_cast<size_t>(divisor->AsIntCon()->IconValue()); if (type == TYP_INT) { // Clear up the upper 32 bits of the value, they may be set to 1 because constants // are treated as signed and stored in ssize_t which is 64 bit in size on 64 bit targets. divisorValue &= UINT32_MAX; } if (divisorValue == 0) { return false; } const bool isDiv = divMod->OperIs(GT_UDIV); if (isPow2(divisorValue)) { genTreeOps newOper; if (isDiv) { newOper = GT_RSZ; divisorValue = genLog2(divisorValue); } else { newOper = GT_AND; divisorValue -= 1; } divMod->SetOper(newOper); divisor->AsIntCon()->SetIconValue(divisorValue); ContainCheckNode(divMod); return true; } if (isDiv) { // If the divisor is greater or equal than 2^(N - 1) then the result is 1 // iff the dividend is greater or equal than the divisor. if (((type == TYP_INT) && (divisorValue > (UINT32_MAX / 2))) || ((type == TYP_LONG) && (divisorValue > (UINT64_MAX / 2)))) { divMod->SetOper(GT_GE); divMod->gtFlags |= GTF_UNSIGNED; ContainCheckNode(divMod); return true; } } // TODO-ARM-CQ: Currently there's no GT_MULHI for ARM32 #if defined(TARGET_XARCH) || defined(TARGET_ARM64) if (!comp->opts.MinOpts() && (divisorValue >= 3)) { size_t magic; bool increment; int preShift; int postShift; bool simpleMul = false; unsigned bits = type == TYP_INT ? 32 : 64; // if the dividend operand is AND or RSZ with a constant then the number of input bits can be reduced if (dividend->OperIs(GT_AND) && dividend->gtGetOp2()->IsCnsIntOrI()) { size_t maskCns = static_cast<size_t>(dividend->gtGetOp2()->AsIntCon()->IconValue()); if (maskCns != 0) { unsigned maskBits = 1; while (maskCns >>= 1) maskBits++; if (maskBits < bits) bits = maskBits; } } else if (dividend->OperIs(GT_RSZ) && dividend->gtGetOp2()->IsCnsIntOrI()) { size_t shiftCns = static_cast<size_t>(dividend->gtGetOp2()->AsIntCon()->IconValue()); if (shiftCns < bits) { bits -= static_cast<unsigned>(shiftCns); } } if (type == TYP_INT) { magic = MagicDivide::GetUnsigned32Magic(static_cast<uint32_t>(divisorValue), &increment, &preShift, &postShift, bits); #ifdef TARGET_64BIT // avoid inc_saturate/multiple shifts by widening to 32x64 MULHI if (increment || (preShift #ifdef TARGET_XARCH // IMUL reg,reg,imm32 can't be used if magic<0 because of sign-extension && static_cast<int32_t>(magic) < 0 #endif )) { magic = MagicDivide::GetUnsigned64Magic(static_cast<uint64_t>(divisorValue), &increment, &preShift, &postShift, bits); } // otherwise just widen to regular multiplication else { postShift += 32; simpleMul = true; } #endif } else { #ifdef TARGET_64BIT magic = MagicDivide::GetUnsigned64Magic(static_cast<uint64_t>(divisorValue), &increment, &preShift, &postShift, bits); #else unreached(); #endif } assert(divMod->MarkedDivideByConstOptimized()); const bool requiresDividendMultiuse = !isDiv; const weight_t curBBWeight = m_block->getBBWeight(comp); if (requiresDividendMultiuse) { LIR::Use dividendUse(BlockRange(), &divMod->gtOp1, divMod); dividend = ReplaceWithLclVar(dividendUse); } GenTree* firstNode = nullptr; GenTree* adjustedDividend = dividend; #ifdef TARGET_ARM64 // On ARM64 we will use a 32x32->64 bit multiply instead of a 64x64->64 one. bool widenToNativeIntForMul = (type != TYP_I_IMPL) && !simpleMul; #else CLANG_FORMAT_COMMENT_ANCHOR; bool widenToNativeIntForMul = (type != TYP_I_IMPL); #endif // If "increment" flag is returned by GetUnsignedMagic we need to do Saturating Increment first if (increment) { adjustedDividend = comp->gtNewOperNode(GT_INC_SATURATE, type, adjustedDividend); BlockRange().InsertBefore(divMod, adjustedDividend); firstNode = adjustedDividend; assert(!preShift); } // if "preShift" is required, then do a right shift before else if (preShift) { GenTree* preShiftBy = comp->gtNewIconNode(preShift, TYP_INT); adjustedDividend = comp->gtNewOperNode(GT_RSZ, type, adjustedDividend, preShiftBy); BlockRange().InsertBefore(divMod, preShiftBy, adjustedDividend); firstNode = preShiftBy; } else if (widenToNativeIntForMul) { adjustedDividend = comp->gtNewCastNode(TYP_I_IMPL, adjustedDividend, true, TYP_I_IMPL); BlockRange().InsertBefore(divMod, adjustedDividend); firstNode = adjustedDividend; } #ifdef TARGET_XARCH // force input transformation to RAX because the following MULHI will kill RDX:RAX anyway and LSRA often causes // reduntant copies otherwise if (firstNode && !simpleMul) { adjustedDividend->SetRegNum(REG_RAX); } #endif if (widenToNativeIntForMul) { divisor->gtType = TYP_I_IMPL; } divisor->AsIntCon()->SetIconValue(magic); if (isDiv && !postShift && (type == TYP_I_IMPL)) { divMod->SetOper(GT_MULHI); divMod->gtOp1 = adjustedDividend; divMod->SetUnsigned(); } else { #ifdef TARGET_ARM64 // 64-bit MUL is more expensive than UMULL on ARM64. genTreeOps mulOper = simpleMul ? GT_MUL_LONG : GT_MULHI; #else // 64-bit IMUL is less expensive than MUL eax:edx on x64. genTreeOps mulOper = simpleMul ? GT_MUL : GT_MULHI; #endif // Insert a new multiplication node before the existing GT_UDIV/GT_UMOD node. // The existing node will later be transformed into a GT_RSZ/GT_SUB that // computes the final result. This way don't need to find and change the use // of the existing node. GenTree* mulhi = comp->gtNewOperNode(mulOper, TYP_I_IMPL, adjustedDividend, divisor); mulhi->SetUnsigned(); BlockRange().InsertBefore(divMod, mulhi); if (firstNode == nullptr) { firstNode = mulhi; } if (postShift) { GenTree* shiftBy = comp->gtNewIconNode(postShift, TYP_INT); BlockRange().InsertBefore(divMod, shiftBy); if (isDiv && (type == TYP_I_IMPL)) { divMod->SetOper(GT_RSZ); divMod->gtOp1 = mulhi; divMod->gtOp2 = shiftBy; } else { mulhi = comp->gtNewOperNode(GT_RSZ, TYP_I_IMPL, mulhi, shiftBy); BlockRange().InsertBefore(divMod, mulhi); } } if (!isDiv) { // divisor UMOD dividend = dividend SUB (div MUL divisor) GenTree* divisor = comp->gtNewIconNode(divisorValue, type); GenTree* mul = comp->gtNewOperNode(GT_MUL, type, mulhi, divisor); dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()); divMod->SetOper(GT_SUB); divMod->gtOp1 = dividend; divMod->gtOp2 = mul; BlockRange().InsertBefore(divMod, divisor, mul, dividend); } else if (type != TYP_I_IMPL) { #ifdef TARGET_ARMARCH divMod->SetOper(GT_CAST); divMod->SetUnsigned(); divMod->AsCast()->gtCastType = TYP_INT; #else divMod->SetOper(GT_BITCAST); #endif divMod->gtOp1 = mulhi; divMod->gtOp2 = nullptr; } } if (firstNode != nullptr) { ContainCheckRange(firstNode, divMod); } return true; } #endif return false; } // LowerConstIntDivOrMod: Transform integer GT_DIV/GT_MOD nodes with a power of 2 // const divisor into equivalent but faster sequences. // // Arguments: // node - pointer to the DIV or MOD node // // Returns: // nullptr if no transformation is done, or the next node in the transformed node sequence that // needs to be lowered. // GenTree* Lowering::LowerConstIntDivOrMod(GenTree* node) { assert((node->OperGet() == GT_DIV) || (node->OperGet() == GT_MOD)); GenTree* divMod = node; GenTree* dividend = divMod->gtGetOp1(); GenTree* divisor = divMod->gtGetOp2(); const var_types type = divMod->TypeGet(); assert((type == TYP_INT) || (type == TYP_LONG)); #if defined(USE_HELPERS_FOR_INT_DIV) assert(!"unreachable: integral GT_DIV/GT_MOD should get morphed into helper calls"); #endif // USE_HELPERS_FOR_INT_DIV #if defined(TARGET_ARM64) assert(node->OperGet() != GT_MOD); #endif // TARGET_ARM64 if (!divisor->IsCnsIntOrI()) { return nullptr; // no transformations to make } if (dividend->IsCnsIntOrI()) { // We shouldn't see a divmod with constant operands here but if we do then it's likely // because optimizations are disabled or it's a case that's supposed to throw an exception. // Don't optimize this. return nullptr; } ssize_t divisorValue = divisor->AsIntCon()->IconValue(); if (divisorValue == -1 || divisorValue == 0) { // x / 0 and x % 0 can't be optimized because they are required to throw an exception. // x / -1 can't be optimized because INT_MIN / -1 is required to throw an exception. // x % -1 is always 0 and the IL spec says that the rem instruction "can" throw an exception if x is // the minimum representable integer. However, the C# spec says that an exception "is" thrown in this // case so optimizing this case would break C# code. // A runtime check could be used to handle this case but it's probably too rare to matter. return nullptr; } bool isDiv = divMod->OperGet() == GT_DIV; if (isDiv) { if ((type == TYP_INT && divisorValue == INT_MIN) || (type == TYP_LONG && divisorValue == INT64_MIN)) { // If the divisor is the minimum representable integer value then we can use a compare, // the result is 1 iff the dividend equals divisor. divMod->SetOper(GT_EQ); return node; } } size_t absDivisorValue = (divisorValue == SSIZE_T_MIN) ? static_cast<size_t>(divisorValue) : static_cast<size_t>(abs(divisorValue)); if (!isPow2(absDivisorValue)) { if (comp->opts.MinOpts()) { return nullptr; } #if defined(TARGET_XARCH) || defined(TARGET_ARM64) ssize_t magic; int shift; if (type == TYP_INT) { magic = MagicDivide::GetSigned32Magic(static_cast<int32_t>(divisorValue), &shift); } else { #ifdef TARGET_64BIT magic = MagicDivide::GetSigned64Magic(static_cast<int64_t>(divisorValue), &shift); #else // !TARGET_64BIT unreached(); #endif // !TARGET_64BIT } divisor->AsIntConCommon()->SetIconValue(magic); // Insert a new GT_MULHI node in front of the existing GT_DIV/GT_MOD node. // The existing node will later be transformed into a GT_ADD/GT_SUB that // computes the final result. This way don't need to find and change the // use of the existing node. GenTree* mulhi = comp->gtNewOperNode(GT_MULHI, type, divisor, dividend); BlockRange().InsertBefore(divMod, mulhi); // mulhi was the easy part. Now we need to generate different code depending // on the divisor value: // For 3 we need: // div = signbit(mulhi) + mulhi // For 5 we need: // div = signbit(mulhi) + sar(mulhi, 1) ; requires shift adjust // For 7 we need: // mulhi += dividend ; requires add adjust // div = signbit(mulhi) + sar(mulhi, 2) ; requires shift adjust // For -3 we need: // mulhi -= dividend ; requires sub adjust // div = signbit(mulhi) + sar(mulhi, 1) ; requires shift adjust bool requiresAddSubAdjust = signum(divisorValue) != signum(magic); bool requiresShiftAdjust = shift != 0; bool requiresDividendMultiuse = requiresAddSubAdjust || !isDiv; if (requiresDividendMultiuse) { LIR::Use dividendUse(BlockRange(), &mulhi->AsOp()->gtOp2, mulhi); dividend = ReplaceWithLclVar(dividendUse); } GenTree* adjusted; if (requiresAddSubAdjust) { dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()); adjusted = comp->gtNewOperNode(divisorValue > 0 ? GT_ADD : GT_SUB, type, mulhi, dividend); BlockRange().InsertBefore(divMod, dividend, adjusted); } else { adjusted = mulhi; } GenTree* shiftBy = comp->gtNewIconNode(genTypeSize(type) * 8 - 1, type); GenTree* signBit = comp->gtNewOperNode(GT_RSZ, type, adjusted, shiftBy); BlockRange().InsertBefore(divMod, shiftBy, signBit); LIR::Use adjustedUse(BlockRange(), &signBit->AsOp()->gtOp1, signBit); adjusted = ReplaceWithLclVar(adjustedUse); adjusted = comp->gtNewLclvNode(adjusted->AsLclVar()->GetLclNum(), adjusted->TypeGet()); BlockRange().InsertBefore(divMod, adjusted); if (requiresShiftAdjust) { shiftBy = comp->gtNewIconNode(shift, TYP_INT); adjusted = comp->gtNewOperNode(GT_RSH, type, adjusted, shiftBy); BlockRange().InsertBefore(divMod, shiftBy, adjusted); } if (isDiv) { divMod->SetOperRaw(GT_ADD); divMod->AsOp()->gtOp1 = adjusted; divMod->AsOp()->gtOp2 = signBit; } else { GenTree* div = comp->gtNewOperNode(GT_ADD, type, adjusted, signBit); dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()); // divisor % dividend = dividend - divisor x div GenTree* divisor = comp->gtNewIconNode(divisorValue, type); GenTree* mul = comp->gtNewOperNode(GT_MUL, type, div, divisor); BlockRange().InsertBefore(divMod, dividend, div, divisor, mul); divMod->SetOperRaw(GT_SUB); divMod->AsOp()->gtOp1 = dividend; divMod->AsOp()->gtOp2 = mul; } return mulhi; #elif defined(TARGET_ARM) // Currently there's no GT_MULHI for ARM32 return nullptr; #else #error Unsupported or unset target architecture #endif } // We're committed to the conversion now. Go find the use if any. LIR::Use use; if (!BlockRange().TryGetUse(node, &use)) { return nullptr; } // We need to use the dividend node multiple times so its value needs to be // computed once and stored in a temp variable. LIR::Use opDividend(BlockRange(), &divMod->AsOp()->gtOp1, divMod); dividend = ReplaceWithLclVar(opDividend); GenTree* adjustment = comp->gtNewOperNode(GT_RSH, type, dividend, comp->gtNewIconNode(type == TYP_INT ? 31 : 63)); if (absDivisorValue == 2) { // If the divisor is +/-2 then we'd end up with a bitwise and between 0/-1 and 1. // We can get the same result by using GT_RSZ instead of GT_RSH. adjustment->SetOper(GT_RSZ); } else { adjustment = comp->gtNewOperNode(GT_AND, type, adjustment, comp->gtNewIconNode(absDivisorValue - 1, type)); } GenTree* adjustedDividend = comp->gtNewOperNode(GT_ADD, type, adjustment, comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet())); GenTree* newDivMod; if (isDiv) { // perform the division by right shifting the adjusted dividend divisor->AsIntCon()->SetIconValue(genLog2(absDivisorValue)); newDivMod = comp->gtNewOperNode(GT_RSH, type, adjustedDividend, divisor); ContainCheckShiftRotate(newDivMod->AsOp()); if (divisorValue < 0) { // negate the result if the divisor is negative newDivMod = comp->gtNewOperNode(GT_NEG, type, newDivMod); ContainCheckNode(newDivMod); } } else { // divisor % dividend = dividend - divisor x (dividend / divisor) // divisor x (dividend / divisor) translates to (dividend >> log2(divisor)) << log2(divisor) // which simply discards the low log2(divisor) bits, that's just dividend & ~(divisor - 1) divisor->AsIntCon()->SetIconValue(~(absDivisorValue - 1)); newDivMod = comp->gtNewOperNode(GT_SUB, type, comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()), comp->gtNewOperNode(GT_AND, type, adjustedDividend, divisor)); } // Remove the divisor and dividend nodes from the linear order, // since we have reused them and will resequence the tree BlockRange().Remove(divisor); BlockRange().Remove(dividend); // linearize and insert the new tree before the original divMod node InsertTreeBeforeAndContainCheck(divMod, newDivMod); BlockRange().Remove(divMod); // replace the original divmod node with the new divmod tree use.ReplaceWith(newDivMod); return newDivMod->gtNext; } //------------------------------------------------------------------------ // LowerSignedDivOrMod: transform integer GT_DIV/GT_MOD nodes with a power of 2 // const divisor into equivalent but faster sequences. // // Arguments: // node - the DIV or MOD node // // Returns: // The next node to lower. // GenTree* Lowering::LowerSignedDivOrMod(GenTree* node) { assert((node->OperGet() == GT_DIV) || (node->OperGet() == GT_MOD)); GenTree* next = node->gtNext; if (varTypeIsIntegral(node->TypeGet())) { // LowerConstIntDivOrMod will return nullptr if it doesn't transform the node. GenTree* newNode = LowerConstIntDivOrMod(node); if (newNode != nullptr) { return newNode; } } ContainCheckDivOrMod(node->AsOp()); return next; } //------------------------------------------------------------------------ // LowerShift: Lower shift nodes // // Arguments: // shift - the shift node (GT_LSH, GT_RSH or GT_RSZ) // // Notes: // Remove unnecessary shift count masking, xarch shift instructions // mask the shift count to 5 bits (or 6 bits for 64 bit operations). void Lowering::LowerShift(GenTreeOp* shift) { assert(shift->OperIs(GT_LSH, GT_RSH, GT_RSZ)); size_t mask = 0x1f; #ifdef TARGET_64BIT if (varTypeIsLong(shift->TypeGet())) { mask = 0x3f; } #else assert(!varTypeIsLong(shift->TypeGet())); #endif for (GenTree* andOp = shift->gtGetOp2(); andOp->OperIs(GT_AND); andOp = andOp->gtGetOp1()) { GenTree* maskOp = andOp->gtGetOp2(); if (!maskOp->IsCnsIntOrI()) { break; } if ((static_cast<size_t>(maskOp->AsIntCon()->IconValue()) & mask) != mask) { break; } shift->gtOp2 = andOp->gtGetOp1(); BlockRange().Remove(andOp); BlockRange().Remove(maskOp); // The parent was replaced, clear contain and regOpt flag. shift->gtOp2->ClearContained(); } ContainCheckShiftRotate(shift); #ifdef TARGET_ARM64 // Try to recognize ubfiz/sbfiz idiom in LSH(CAST(X), CNS) tree if (comp->opts.OptimizationEnabled() && shift->OperIs(GT_LSH) && shift->gtGetOp1()->OperIs(GT_CAST) && shift->gtGetOp2()->IsCnsIntOrI() && !shift->isContained()) { GenTreeIntCon* cns = shift->gtGetOp2()->AsIntCon(); GenTreeCast* cast = shift->gtGetOp1()->AsCast(); if (!cast->isContained() && !cast->IsRegOptional() && !cast->gtOverflow() && // Smaller CastOp is most likely an IND(X) node which is lowered to a zero-extend load cast->CastOp()->TypeIs(TYP_LONG, TYP_INT)) { // Cast is either "TYP_LONG <- TYP_INT" or "TYP_INT <- %SMALL_INT% <- TYP_INT" (signed or unsigned) unsigned dstBits = genTypeSize(cast) * BITS_PER_BYTE; unsigned srcBits = varTypeIsSmall(cast->CastToType()) ? genTypeSize(cast->CastToType()) * BITS_PER_BYTE : genTypeSize(cast->CastOp()) * BITS_PER_BYTE; assert(!cast->CastOp()->isContained()); // It has to be an upcast and CNS must be in [1..srcBits) range if ((srcBits < dstBits) && (cns->IconValue() > 0) && (cns->IconValue() < srcBits)) { JITDUMP("Recognized ubfix/sbfix pattern in LSH(CAST, CNS). Changing op to GT_BFIZ"); shift->ChangeOper(GT_BFIZ); MakeSrcContained(shift, cast); } } } #endif } void Lowering::WidenSIMD12IfNecessary(GenTreeLclVarCommon* node) { #ifdef FEATURE_SIMD if (node->TypeGet() == TYP_SIMD12) { // Assumption 1: // RyuJit backend depends on the assumption that on 64-Bit targets Vector3 size is rounded off // to TARGET_POINTER_SIZE and hence Vector3 locals on stack can be treated as TYP_SIMD16 for // reading and writing purposes. // // Assumption 2: // RyuJit backend is making another implicit assumption that Vector3 type args when passed in // registers or on stack, the upper most 4-bytes will be zero. // // For P/Invoke return and Reverse P/Invoke argument passing, native compiler doesn't guarantee // that upper 4-bytes of a Vector3 type struct is zero initialized and hence assumption 2 is // invalid. // // RyuJIT x64 Windows: arguments are treated as passed by ref and hence read/written just 12 // bytes. In case of Vector3 returns, Caller allocates a zero initialized Vector3 local and // passes it retBuf arg and Callee method writes only 12 bytes to retBuf. For this reason, // there is no need to clear upper 4-bytes of Vector3 type args. // // RyuJIT x64 Unix: arguments are treated as passed by value and read/writen as if TYP_SIMD16. // Vector3 return values are returned two return registers and Caller assembles them into a // single xmm reg. Hence RyuJIT explicitly generates code to clears upper 4-bytes of Vector3 // type args in prolog and Vector3 type return value of a call // // RyuJIT x86 Windows: all non-param Vector3 local vars are allocated as 16 bytes. Vector3 arguments // are pushed as 12 bytes. For return values, a 16-byte local is allocated and the address passed // as a return buffer pointer. The callee doesn't write the high 4 bytes, and we don't need to clear // it either. LclVarDsc* varDsc = comp->lvaGetDesc(node->AsLclVarCommon()); if (comp->lvaMapSimd12ToSimd16(varDsc)) { JITDUMP("Mapping TYP_SIMD12 lclvar node to TYP_SIMD16:\n"); DISPNODE(node); JITDUMP("============"); node->gtType = TYP_SIMD16; } } #endif // FEATURE_SIMD } //------------------------------------------------------------------------ // LowerArrElem: Lower a GT_ARR_ELEM node // // Arguments: // node - the GT_ARR_ELEM node to lower. // // Return Value: // The next node to lower. // // Assumptions: // pTree points to a pointer to a GT_ARR_ELEM node. // // Notes: // This performs the following lowering. We start with a node of the form: // /--* <arrObj> // +--* <index0> // +--* <index1> // /--* arrMD&[,] // // First, we create temps for arrObj if it is not already a lclVar, and for any of the index // expressions that have side-effects. // We then transform the tree into: // <offset is null - no accumulated offset for the first index> // /--* <arrObj> // +--* <index0> // /--* ArrIndex[i, ] // +--* <arrObj> // /--| arrOffs[i, ] // | +--* <arrObj> // | +--* <index1> // +--* ArrIndex[*,j] // +--* <arrObj> // /--| arrOffs[*,j] // +--* lclVar NewTemp // /--* lea (scale = element size, offset = offset of first element) // // The new stmtExpr may be omitted if the <arrObj> is a lclVar. // The new stmtExpr may be embedded if the <arrObj> is not the first tree in linear order for // the statement containing the original arrMD. // Note that the arrMDOffs is the INDEX of the lea, but is evaluated before the BASE (which is the second // reference to NewTemp), because that provides more accurate lifetimes. // There may be 1, 2 or 3 dimensions, with 1, 2 or 3 arrMDIdx nodes, respectively. // GenTree* Lowering::LowerArrElem(GenTree* node) { // This will assert if we don't have an ArrElem node GenTreeArrElem* arrElem = node->AsArrElem(); const unsigned char rank = arrElem->gtArrRank; JITDUMP("Lowering ArrElem\n"); JITDUMP("============\n"); DISPTREERANGE(BlockRange(), arrElem); JITDUMP("\n"); assert(arrElem->gtArrObj->TypeGet() == TYP_REF); // We need to have the array object in a lclVar. if (!arrElem->gtArrObj->IsLocal()) { LIR::Use arrObjUse(BlockRange(), &arrElem->gtArrObj, arrElem); ReplaceWithLclVar(arrObjUse); } GenTree* arrObjNode = arrElem->gtArrObj; assert(arrObjNode->IsLocal()); GenTree* insertionPoint = arrElem; // The first ArrOffs node will have 0 for the offset of the previous dimension. GenTree* prevArrOffs = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, 0); BlockRange().InsertBefore(insertionPoint, prevArrOffs); GenTree* nextToLower = prevArrOffs; for (unsigned char dim = 0; dim < rank; dim++) { GenTree* indexNode = arrElem->gtArrInds[dim]; // Use the original arrObjNode on the 0th ArrIndex node, and clone it for subsequent ones. GenTree* idxArrObjNode; if (dim == 0) { idxArrObjNode = arrObjNode; } else { idxArrObjNode = comp->gtClone(arrObjNode); BlockRange().InsertBefore(insertionPoint, idxArrObjNode); } // Next comes the GT_ARR_INDEX node. GenTreeArrIndex* arrMDIdx = new (comp, GT_ARR_INDEX) GenTreeArrIndex(TYP_INT, idxArrObjNode, indexNode, dim, rank, arrElem->gtArrElemType); arrMDIdx->gtFlags |= ((idxArrObjNode->gtFlags | indexNode->gtFlags) & GTF_ALL_EFFECT); BlockRange().InsertBefore(insertionPoint, arrMDIdx); GenTree* offsArrObjNode = comp->gtClone(arrObjNode); BlockRange().InsertBefore(insertionPoint, offsArrObjNode); GenTreeArrOffs* arrOffs = new (comp, GT_ARR_OFFSET) GenTreeArrOffs(TYP_I_IMPL, prevArrOffs, arrMDIdx, offsArrObjNode, dim, rank, arrElem->gtArrElemType); arrOffs->gtFlags |= ((prevArrOffs->gtFlags | arrMDIdx->gtFlags | offsArrObjNode->gtFlags) & GTF_ALL_EFFECT); BlockRange().InsertBefore(insertionPoint, arrOffs); prevArrOffs = arrOffs; } // Generate the LEA and make it reverse evaluation, because we want to evaluate the index expression before the // base. unsigned scale = arrElem->gtArrElemSize; unsigned offset = comp->eeGetMDArrayDataOffset(arrElem->gtArrRank); GenTree* leaIndexNode = prevArrOffs; if (!jitIsScaleIndexMul(scale)) { // We do the address arithmetic in TYP_I_IMPL, though note that the lower bounds and lengths in memory are // TYP_INT GenTree* scaleNode = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, scale); GenTree* mulNode = new (comp, GT_MUL) GenTreeOp(GT_MUL, TYP_I_IMPL, leaIndexNode, scaleNode); BlockRange().InsertBefore(insertionPoint, scaleNode, mulNode); leaIndexNode = mulNode; scale = 1; } GenTree* leaBase = comp->gtClone(arrObjNode); BlockRange().InsertBefore(insertionPoint, leaBase); GenTree* leaNode = new (comp, GT_LEA) GenTreeAddrMode(arrElem->TypeGet(), leaBase, leaIndexNode, scale, offset); BlockRange().InsertBefore(insertionPoint, leaNode); LIR::Use arrElemUse; if (BlockRange().TryGetUse(arrElem, &arrElemUse)) { arrElemUse.ReplaceWith(leaNode); } else { leaNode->SetUnusedValue(); } BlockRange().Remove(arrElem); JITDUMP("Results of lowering ArrElem:\n"); DISPTREERANGE(BlockRange(), leaNode); JITDUMP("\n\n"); return nextToLower; } PhaseStatus Lowering::DoPhase() { // If we have any PInvoke calls, insert the one-time prolog code. We'll insert the epilog code in the // appropriate spots later. NOTE: there is a minor optimization opportunity here, as we still create p/invoke // data structures and setup/teardown even if we've eliminated all p/invoke calls due to dead code elimination. if (comp->compMethodRequiresPInvokeFrame()) { InsertPInvokeMethodProlog(); } #if !defined(TARGET_64BIT) DecomposeLongs decomp(comp); // Initialize the long decomposition class. if (comp->compLongUsed) { decomp.PrepareForDecomposition(); } #endif // !defined(TARGET_64BIT) if (!comp->compEnregLocals()) { // Lowering is checking if lvDoNotEnregister is already set for contained optimizations. // If we are running without `CLFLG_REGVAR` flag set (`compEnregLocals() == false`) // then we already know that we won't enregister any locals and it is better to set // `lvDoNotEnregister` flag before we start reading it. // The main reason why this flag is not set is that we are running in minOpts. comp->lvSetMinOptsDoNotEnreg(); } for (BasicBlock* const block : comp->Blocks()) { /* Make the block publicly available */ comp->compCurBB = block; #if !defined(TARGET_64BIT) if (comp->compLongUsed) { decomp.DecomposeBlock(block); } #endif //! TARGET_64BIT LowerBlock(block); } #ifdef DEBUG JITDUMP("Lower has completed modifying nodes.\n"); if (VERBOSE) { comp->fgDispBasicBlocks(true); } #endif // Recompute local var ref counts before potentially sorting for liveness. // Note this does minimal work in cases where we are not going to sort. const bool isRecompute = true; const bool setSlotNumbers = false; comp->lvaComputeRefCounts(isRecompute, setSlotNumbers); comp->fgLocalVarLiveness(); // local var liveness can delete code, which may create empty blocks if (comp->opts.OptimizationEnabled()) { comp->optLoopsMarked = false; bool modified = comp->fgUpdateFlowGraph(); if (modified) { JITDUMP("had to run another liveness pass:\n"); comp->fgLocalVarLiveness(); } } // Recompute local var ref counts again after liveness to reflect // impact of any dead code removal. Note this may leave us with // tracked vars that have zero refs. comp->lvaComputeRefCounts(isRecompute, setSlotNumbers); return PhaseStatus::MODIFIED_EVERYTHING; } #ifdef DEBUG //------------------------------------------------------------------------ // Lowering::CheckCallArg: check that a call argument is in an expected // form after lowering. // // Arguments: // arg - the argument to check. // void Lowering::CheckCallArg(GenTree* arg) { if (!arg->IsValue() && !arg->OperIsPutArgStk()) { assert((arg->OperIsStore() && !arg->IsValue()) || arg->IsArgPlaceHolderNode() || arg->IsNothingNode() || arg->OperIsCopyBlkOp()); return; } switch (arg->OperGet()) { case GT_FIELD_LIST: { GenTreeFieldList* list = arg->AsFieldList(); assert(list->isContained()); for (GenTreeFieldList::Use& use : list->Uses()) { assert(use.GetNode()->OperIsPutArg()); } } break; default: assert(arg->OperIsPutArg()); break; } } //------------------------------------------------------------------------ // Lowering::CheckCall: check that a call is in an expected form after // lowering. Currently this amounts to checking its // arguments, but could be expanded to verify more // properties in the future. // // Arguments: // call - the call to check. // void Lowering::CheckCall(GenTreeCall* call) { if (call->gtCallThisArg != nullptr) { CheckCallArg(call->gtCallThisArg->GetNode()); } for (GenTreeCall::Use& use : call->Args()) { CheckCallArg(use.GetNode()); } for (GenTreeCall::Use& use : call->LateArgs()) { CheckCallArg(use.GetNode()); } } //------------------------------------------------------------------------ // Lowering::CheckNode: check that an LIR node is in an expected form // after lowering. // // Arguments: // compiler - the compiler context. // node - the node to check. // void Lowering::CheckNode(Compiler* compiler, GenTree* node) { switch (node->OperGet()) { case GT_CALL: CheckCall(node->AsCall()); break; #ifdef FEATURE_SIMD case GT_SIMD: case GT_HWINTRINSIC: assert(node->TypeGet() != TYP_SIMD12); break; #endif // FEATURE_SIMD case GT_LCL_VAR: case GT_STORE_LCL_VAR: { const LclVarDsc* varDsc = compiler->lvaGetDesc(node->AsLclVar()); #if defined(FEATURE_SIMD) && defined(TARGET_64BIT) if (node->TypeIs(TYP_SIMD12)) { assert(compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc) || (varDsc->lvSize() == 12)); } #endif // FEATURE_SIMD && TARGET_64BIT if (varDsc->lvPromoted) { assert(varDsc->lvDoNotEnregister || varDsc->lvIsMultiRegRet); } } break; case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: { const GenTreeLclVarCommon* lclVarAddr = node->AsLclVarCommon(); const LclVarDsc* varDsc = compiler->lvaGetDesc(lclVarAddr); if (((lclVarAddr->gtFlags & GTF_VAR_DEF) != 0) && varDsc->HasGCPtr()) { // Emitter does not correctly handle live updates for LCL_VAR_ADDR // when they are not contained, for example, `STOREIND byref(GT_LCL_VAR_ADDR not-contained)` // would generate: // add r1, sp, 48 // r1 contains address of a lclVar V01. // str r0, [r1] // a gc ref becomes live in V01, but emitter would not report it. // Make sure that we use uncontained address nodes only for variables // that will be marked as mustInit and will be alive throughout the whole block even when tracked. assert(lclVarAddr->isContained() || !varDsc->lvTracked || varTypeIsStruct(varDsc)); // TODO: support this assert for uses, see https://github.com/dotnet/runtime/issues/51900. } assert(varDsc->lvDoNotEnregister); break; } case GT_PHI: case GT_PHI_ARG: assert(!"Should not see phi nodes after rationalize"); break; case GT_LCL_FLD: case GT_STORE_LCL_FLD: { const LclVarDsc* varDsc = compiler->lvaGetDesc(node->AsLclFld()); assert(varDsc->lvDoNotEnregister); } break; default: break; } } //------------------------------------------------------------------------ // Lowering::CheckBlock: check that the contents of an LIR block are in an // expected form after lowering. // // Arguments: // compiler - the compiler context. // block - the block to check. // bool Lowering::CheckBlock(Compiler* compiler, BasicBlock* block) { assert(block->isEmpty() || block->IsLIR()); LIR::Range& blockRange = LIR::AsRange(block); for (GenTree* node : blockRange) { CheckNode(compiler, node); } assert(blockRange.CheckLIR(compiler, true)); return true; } #endif //------------------------------------------------------------------------ // Lowering::LowerBlock: Lower all the nodes in a BasicBlock // // Arguments: // block - the block to lower. // void Lowering::LowerBlock(BasicBlock* block) { assert(block == comp->compCurBB); // compCurBB must already be set. assert(block->isEmpty() || block->IsLIR()); m_block = block; // NOTE: some of the lowering methods insert calls before the node being // lowered (See e.g. InsertPInvoke{Method,Call}{Prolog,Epilog}). In // general, any code that is inserted before the current node should be // "pre-lowered" as they won't be subject to further processing. // Lowering::CheckBlock() runs some extra checks on call arguments in // order to help catch unlowered nodes. GenTree* node = BlockRange().FirstNode(); while (node != nullptr) { node = LowerNode(node); } assert(CheckBlock(comp, block)); } /** Verifies if both of these trees represent the same indirection. * Used by Lower to annotate if CodeGen generate an instruction of the * form *addrMode BinOp= expr * * Preconditions: both trees are children of GT_INDs and their underlying children * have the same gtOper. * * This is a first iteration to actually recognize trees that can be code-generated * as a single read-modify-write instruction on AMD64/x86. For now * this method only supports the recognition of simple addressing modes (through GT_LEA) * or local var indirections. Local fields, array access and other more complex nodes are * not yet supported. * * TODO-CQ: Perform tree recognition by using the Value Numbering Package, that way we can recognize * arbitrary complex trees and support much more addressing patterns. */ bool Lowering::IndirsAreEquivalent(GenTree* candidate, GenTree* storeInd) { assert(candidate->OperGet() == GT_IND); assert(storeInd->OperGet() == GT_STOREIND); // We should check the size of the indirections. If they are // different, say because of a cast, then we can't call them equivalent. Doing so could cause us // to drop a cast. // Signed-ness difference is okay and expected since a store indirection must always // be signed based on the CIL spec, but a load could be unsigned. if (genTypeSize(candidate->gtType) != genTypeSize(storeInd->gtType)) { return false; } GenTree* pTreeA = candidate->gtGetOp1(); GenTree* pTreeB = storeInd->gtGetOp1(); // This method will be called by codegen (as well as during lowering). // After register allocation, the sources may have been spilled and reloaded // to a different register, indicated by an inserted GT_RELOAD node. pTreeA = pTreeA->gtSkipReloadOrCopy(); pTreeB = pTreeB->gtSkipReloadOrCopy(); genTreeOps oper; if (pTreeA->OperGet() != pTreeB->OperGet()) { return false; } oper = pTreeA->OperGet(); switch (oper) { case GT_LCL_VAR: case GT_LCL_VAR_ADDR: case GT_CLS_VAR_ADDR: case GT_CNS_INT: return NodesAreEquivalentLeaves(pTreeA, pTreeB); case GT_LEA: { GenTreeAddrMode* gtAddr1 = pTreeA->AsAddrMode(); GenTreeAddrMode* gtAddr2 = pTreeB->AsAddrMode(); return NodesAreEquivalentLeaves(gtAddr1->Base(), gtAddr2->Base()) && NodesAreEquivalentLeaves(gtAddr1->Index(), gtAddr2->Index()) && (gtAddr1->gtScale == gtAddr2->gtScale) && (gtAddr1->Offset() == gtAddr2->Offset()); } default: // We don't handle anything that is not either a constant, // a local var or LEA. return false; } } //------------------------------------------------------------------------ // NodesAreEquivalentLeaves: Check whether the two given nodes are the same leaves. // // Arguments: // tree1 and tree2 are nodes to be checked. // Return Value: // Returns true if they are same leaves, false otherwise. // // static bool Lowering::NodesAreEquivalentLeaves(GenTree* tree1, GenTree* tree2) { if (tree1 == tree2) { return true; } if (tree1 == nullptr || tree2 == nullptr) { return false; } tree1 = tree1->gtSkipReloadOrCopy(); tree2 = tree2->gtSkipReloadOrCopy(); if (tree1->TypeGet() != tree2->TypeGet()) { return false; } if (tree1->OperGet() != tree2->OperGet()) { return false; } if (!tree1->OperIsLeaf() || !tree2->OperIsLeaf()) { return false; } switch (tree1->OperGet()) { case GT_CNS_INT: return tree1->AsIntCon()->IconValue() == tree2->AsIntCon()->IconValue() && tree1->IsIconHandle() == tree2->IsIconHandle(); case GT_LCL_VAR: case GT_LCL_VAR_ADDR: return tree1->AsLclVarCommon()->GetLclNum() == tree2->AsLclVarCommon()->GetLclNum(); case GT_CLS_VAR_ADDR: return tree1->AsClsVar()->gtClsVarHnd == tree2->AsClsVar()->gtClsVarHnd; default: return false; } } //------------------------------------------------------------------------ // Lowering::CheckMultiRegLclVar: Check whether a MultiReg GT_LCL_VAR node can // remain a multi-reg. // // Arguments: // lclNode - the GT_LCL_VAR or GT_STORE_LCL_VAR node. // retTypeDesc - a return type descriptor either for a call source of a store of // the local, or for the GT_RETURN consumer of the local. // // Notes: // If retTypeDesc is non-null, this method will check that the fields are compatible. // Otherwise, it will only check that the lclVar is independently promoted // (i.e. it is marked lvPromoted and not lvDoNotEnregister). // bool Lowering::CheckMultiRegLclVar(GenTreeLclVar* lclNode, const ReturnTypeDesc* retTypeDesc) { bool canEnregister = false; #if FEATURE_MULTIREG_RET LclVarDsc* varDsc = comp->lvaGetDesc(lclNode->GetLclNum()); if ((comp->lvaEnregMultiRegVars) && varDsc->lvPromoted) { // We can enregister if we have a promoted struct and all the fields' types match the ABI requirements. // Note that we don't promote structs with explicit layout, so we don't need to check field offsets, and // if we have multiple types packed into a single register, we won't have matching reg and field counts, // so we can tolerate mismatches of integer size. if (varDsc->lvPromoted && (comp->lvaGetPromotionType(varDsc) == Compiler::PROMOTION_TYPE_INDEPENDENT)) { // If we have no retTypeDesc, we only care that it is independently promoted. if (retTypeDesc == nullptr) { canEnregister = true; } else { unsigned regCount = retTypeDesc->GetReturnRegCount(); if (regCount == varDsc->lvFieldCnt) { canEnregister = true; } } } } #ifdef TARGET_XARCH // For local stores on XARCH we only handle mismatched src/dest register count for // calls of SIMD type. If the source was another lclVar similarly promoted, we would // have broken it into multiple stores. if (lclNode->OperIs(GT_STORE_LCL_VAR) && !lclNode->gtGetOp1()->OperIs(GT_CALL)) { canEnregister = false; } #endif // TARGET_XARCH if (canEnregister) { lclNode->SetMultiReg(); } else { lclNode->ClearMultiReg(); if (varDsc->lvPromoted && !varDsc->lvDoNotEnregister) { comp->lvaSetVarDoNotEnregister(lclNode->GetLclNum() DEBUGARG(DoNotEnregisterReason::BlockOp)); } } #endif return canEnregister; } //------------------------------------------------------------------------ // Containment Analysis //------------------------------------------------------------------------ void Lowering::ContainCheckNode(GenTree* node) { switch (node->gtOper) { case GT_STORE_LCL_VAR: case GT_STORE_LCL_FLD: ContainCheckStoreLoc(node->AsLclVarCommon()); break; case GT_EQ: case GT_NE: case GT_LT: case GT_LE: case GT_GE: case GT_GT: case GT_TEST_EQ: case GT_TEST_NE: case GT_CMP: case GT_JCMP: ContainCheckCompare(node->AsOp()); break; case GT_JTRUE: ContainCheckJTrue(node->AsOp()); break; case GT_ADD: case GT_SUB: #if !defined(TARGET_64BIT) case GT_ADD_LO: case GT_ADD_HI: case GT_SUB_LO: case GT_SUB_HI: #endif case GT_AND: case GT_OR: case GT_XOR: ContainCheckBinary(node->AsOp()); break; #if defined(TARGET_X86) case GT_MUL_LONG: #endif case GT_MUL: case GT_MULHI: ContainCheckMul(node->AsOp()); break; case GT_DIV: case GT_MOD: case GT_UDIV: case GT_UMOD: ContainCheckDivOrMod(node->AsOp()); break; case GT_LSH: case GT_RSH: case GT_RSZ: case GT_ROL: case GT_ROR: #ifndef TARGET_64BIT case GT_LSH_HI: case GT_RSH_LO: #endif ContainCheckShiftRotate(node->AsOp()); break; case GT_ARR_OFFSET: ContainCheckArrOffset(node->AsArrOffs()); break; case GT_LCLHEAP: ContainCheckLclHeap(node->AsOp()); break; case GT_RETURN: ContainCheckRet(node->AsOp()); break; case GT_RETURNTRAP: ContainCheckReturnTrap(node->AsOp()); break; case GT_STOREIND: ContainCheckStoreIndir(node->AsStoreInd()); break; case GT_IND: ContainCheckIndir(node->AsIndir()); break; case GT_PUTARG_REG: case GT_PUTARG_STK: #if FEATURE_ARG_SPLIT case GT_PUTARG_SPLIT: #endif // FEATURE_ARG_SPLIT // The regNum must have been set by the lowering of the call. assert(node->GetRegNum() != REG_NA); break; #ifdef TARGET_XARCH case GT_INTRINSIC: ContainCheckIntrinsic(node->AsOp()); break; #endif // TARGET_XARCH #ifdef FEATURE_SIMD case GT_SIMD: ContainCheckSIMD(node->AsSIMD()); break; #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: ContainCheckHWIntrinsic(node->AsHWIntrinsic()); break; #endif // FEATURE_HW_INTRINSICS default: break; } } //------------------------------------------------------------------------ // ContainCheckReturnTrap: determine whether the source of a RETURNTRAP should be contained. // // Arguments: // node - pointer to the GT_RETURNTRAP node // void Lowering::ContainCheckReturnTrap(GenTreeOp* node) { #ifdef TARGET_XARCH assert(node->OperIs(GT_RETURNTRAP)); // This just turns into a compare of its child with an int + a conditional call if (node->gtOp1->isIndir()) { MakeSrcContained(node, node->gtOp1); } #endif // TARGET_XARCH } //------------------------------------------------------------------------ // ContainCheckArrOffset: determine whether the source of an ARR_OFFSET should be contained. // // Arguments: // node - pointer to the GT_ARR_OFFSET node // void Lowering::ContainCheckArrOffset(GenTreeArrOffs* node) { assert(node->OperIs(GT_ARR_OFFSET)); // we don't want to generate code for this if (node->gtOffset->IsIntegralConst(0)) { MakeSrcContained(node, node->AsArrOffs()->gtOffset); } } //------------------------------------------------------------------------ // ContainCheckLclHeap: determine whether the source of a GT_LCLHEAP node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckLclHeap(GenTreeOp* node) { assert(node->OperIs(GT_LCLHEAP)); GenTree* size = node->AsOp()->gtOp1; if (size->IsCnsIntOrI()) { MakeSrcContained(node, size); } } //------------------------------------------------------------------------ // ContainCheckRet: determine whether the source of a node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckRet(GenTreeUnOp* ret) { assert(ret->OperIs(GT_RETURN)); #if !defined(TARGET_64BIT) if (ret->TypeGet() == TYP_LONG) { GenTree* op1 = ret->gtGetOp1(); noway_assert(op1->OperGet() == GT_LONG); MakeSrcContained(ret, op1); } #endif // !defined(TARGET_64BIT) #if FEATURE_MULTIREG_RET if (ret->TypeIs(TYP_STRUCT)) { GenTree* op1 = ret->gtGetOp1(); // op1 must be either a lclvar or a multi-reg returning call if (op1->OperGet() == GT_LCL_VAR) { const LclVarDsc* varDsc = comp->lvaGetDesc(op1->AsLclVarCommon()); // This must be a multi-reg return or an HFA of a single element. assert(varDsc->lvIsMultiRegRet || (varDsc->lvIsHfa() && varTypeIsValidHfaType(varDsc->lvType))); // Mark var as contained if not enregisterable. if (!varDsc->IsEnregisterableLcl()) { if (!op1->IsMultiRegLclVar()) { MakeSrcContained(ret, op1); } } } } #endif // FEATURE_MULTIREG_RET } //------------------------------------------------------------------------ // ContainCheckJTrue: determine whether the source of a JTRUE should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckJTrue(GenTreeOp* node) { // The compare does not need to be generated into a register. GenTree* cmp = node->gtGetOp1(); cmp->gtType = TYP_VOID; cmp->gtFlags |= GTF_SET_FLAGS; } //------------------------------------------------------------------------ // ContainCheckBitCast: determine whether the source of a BITCAST should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckBitCast(GenTree* node) { GenTree* const op1 = node->AsOp()->gtOp1; if (op1->isMemoryOp()) { op1->SetContained(); } else if (op1->OperIs(GT_LCL_VAR)) { if (!m_lsra->willEnregisterLocalVars()) { op1->SetContained(); } const LclVarDsc* varDsc = comp->lvaGetDesc(op1->AsLclVar()); // TODO-Cleanup: we want to check if the local is already known not // to be on reg, for example, because local enreg is disabled. if (varDsc->lvDoNotEnregister) { op1->SetContained(); } else { op1->SetRegOptional(); } } else if (op1->IsLocal()) { op1->SetContained(); } } //------------------------------------------------------------------------ // LowerStoreIndirCommon: a common logic to lower StoreIndir. // // Arguments: // ind - the store indirection node we are lowering. // void Lowering::LowerStoreIndirCommon(GenTreeStoreInd* ind) { assert(ind->TypeGet() != TYP_STRUCT); #if defined(TARGET_ARM64) // Verify containment safety before creating an LEA that must be contained. // const bool isContainable = IsSafeToContainMem(ind, ind->Addr()); #else const bool isContainable = true; #endif TryCreateAddrMode(ind->Addr(), isContainable, ind); if (!comp->codeGen->gcInfo.gcIsWriteBarrierStoreIndNode(ind)) { if (varTypeIsFloating(ind) && ind->Data()->IsCnsFltOrDbl()) { // Optimize *x = DCON to *x = ICON which can be slightly faster and/or smaller. GenTree* data = ind->Data(); double dblCns = data->AsDblCon()->gtDconVal; ssize_t intCns = 0; var_types type = TYP_UNKNOWN; // XARCH: we can always contain the immediates. // ARM64: zero can always be contained, other cases will use immediates from the data // section and it is not a clear win to switch them to inline integers. // ARM: FP constants are assembled from integral ones, so it is always profitable // to directly use the integers as it avoids the int -> float conversion. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_XARCH) || defined(TARGET_ARM) bool shouldSwitchToInteger = true; #else // TARGET_ARM64 bool shouldSwitchToInteger = !data->IsCnsNonZeroFltOrDbl(); #endif if (shouldSwitchToInteger) { if (ind->TypeIs(TYP_FLOAT)) { float fltCns = static_cast<float>(dblCns); // should be a safe round-trip intCns = static_cast<ssize_t>(*reinterpret_cast<INT32*>(&fltCns)); type = TYP_INT; } #ifdef TARGET_64BIT else { assert(ind->TypeIs(TYP_DOUBLE)); intCns = static_cast<ssize_t>(*reinterpret_cast<INT64*>(&dblCns)); type = TYP_LONG; } #endif } if (type != TYP_UNKNOWN) { data->BashToConst(intCns, type); ind->ChangeType(type); } } LowerStoreIndir(ind); } } //------------------------------------------------------------------------ // LowerIndir: a common logic to lower IND load or NullCheck. // // Arguments: // ind - the ind node we are lowering. // void Lowering::LowerIndir(GenTreeIndir* ind) { assert(ind->OperIs(GT_IND, GT_NULLCHECK)); // Process struct typed indirs separately unless they are unused; // they only appear as the source of a block copy operation or a return node. if (!ind->TypeIs(TYP_STRUCT) || ind->IsUnusedValue()) { // TODO-Cleanup: We're passing isContainable = true but ContainCheckIndir rejects // address containment in some cases so we end up creating trivial (reg + offfset) // or (reg + reg) LEAs that are not necessary. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM64) // Verify containment safety before creating an LEA that must be contained. // const bool isContainable = IsSafeToContainMem(ind, ind->Addr()); #else const bool isContainable = true; #endif TryCreateAddrMode(ind->Addr(), isContainable, ind); ContainCheckIndir(ind); if (ind->OperIs(GT_NULLCHECK) || ind->IsUnusedValue()) { TransformUnusedIndirection(ind, comp, m_block); } } else { // If the `ADDR` node under `STORE_OBJ(dstAddr, IND(struct(ADDR))` // is a complex one it could benefit from an `LEA` that is not contained. const bool isContainable = false; TryCreateAddrMode(ind->Addr(), isContainable, ind); } } //------------------------------------------------------------------------ // TransformUnusedIndirection: change the opcode and the type of the unused indirection. // // Arguments: // ind - Indirection to transform. // comp - Compiler instance. // block - Basic block of the indirection. // void Lowering::TransformUnusedIndirection(GenTreeIndir* ind, Compiler* comp, BasicBlock* block) { // A nullcheck is essentially the same as an indirection with no use. // The difference lies in whether a target register must be allocated. // On XARCH we can generate a compare with no target register as long as the address // is not contained. // On ARM64 we can generate a load to REG_ZR in all cases. // However, on ARM we must always generate a load to a register. // In the case where we require a target register, it is better to use GT_IND, since // GT_NULLCHECK is a non-value node and would therefore require an internal register // to use as the target. That is non-optimal because it will be modeled as conflicting // with the source register(s). // So, to summarize: // - On ARM64, always use GT_NULLCHECK for a dead indirection. // - On ARM, always use GT_IND. // - On XARCH, use GT_IND if we have a contained address, and GT_NULLCHECK otherwise. // In all cases we try to preserve the original type and never make it wider to avoid AVEs. // For structs we conservatively lower it to BYTE. For 8-byte primitives we lower it to TYP_INT // on XARCH as an optimization. // assert(ind->OperIs(GT_NULLCHECK, GT_IND, GT_BLK, GT_OBJ)); ind->ChangeType(comp->gtTypeForNullCheck(ind)); #ifdef TARGET_ARM64 bool useNullCheck = true; #elif TARGET_ARM bool useNullCheck = false; #else // TARGET_XARCH bool useNullCheck = !ind->Addr()->isContained(); #endif // !TARGET_XARCH if (useNullCheck && !ind->OperIs(GT_NULLCHECK)) { comp->gtChangeOperToNullCheck(ind, block); ind->ClearUnusedValue(); } else if (!useNullCheck && !ind->OperIs(GT_IND)) { ind->ChangeOper(GT_IND); ind->SetUnusedValue(); } } //------------------------------------------------------------------------ // LowerBlockStoreCommon: a common logic to lower STORE_OBJ/BLK/DYN_BLK. // // Arguments: // blkNode - the store blk/obj node we are lowering. // void Lowering::LowerBlockStoreCommon(GenTreeBlk* blkNode) { assert(blkNode->OperIs(GT_STORE_BLK, GT_STORE_DYN_BLK, GT_STORE_OBJ)); // Lose the type information stored in the source - we no longer need it. if (blkNode->Data()->OperIs(GT_OBJ, GT_BLK)) { blkNode->Data()->SetOper(GT_IND); LowerIndir(blkNode->Data()->AsIndir()); } if (TryTransformStoreObjAsStoreInd(blkNode)) { return; } LowerBlockStore(blkNode); } //------------------------------------------------------------------------ // TryTransformStoreObjAsStoreInd: try to replace STORE_OBJ/BLK as STOREIND. // // Arguments: // blkNode - the store node. // // Return value: // true if the replacement was made, false otherwise. // // Notes: // TODO-CQ: this method should do the transformation when possible // and STOREIND should always generate better or the same code as // STORE_OBJ/BLK for the same copy. // bool Lowering::TryTransformStoreObjAsStoreInd(GenTreeBlk* blkNode) { assert(blkNode->OperIs(GT_STORE_BLK, GT_STORE_DYN_BLK, GT_STORE_OBJ)); if (!comp->opts.OptimizationEnabled()) { return false; } if (blkNode->OperIs(GT_STORE_DYN_BLK)) { return false; } ClassLayout* layout = blkNode->GetLayout(); if (layout == nullptr) { return false; } var_types regType = layout->GetRegisterType(); if (regType == TYP_UNDEF) { return false; } GenTree* src = blkNode->Data(); if (varTypeIsSIMD(regType) && src->IsConstInitVal()) { // TODO-CQ: support STORE_IND SIMD16(SIMD16, CNT_INT 0). return false; } if (varTypeIsGC(regType)) { // TODO-CQ: STOREIND does not try to contain src if we need a barrier, // STORE_OBJ generates better code currently. return false; } if (src->OperIsInitVal() && !src->IsConstInitVal()) { return false; } if (varTypeIsSmall(regType) && !src->IsConstInitVal() && !src->IsLocal()) { // source operand INDIR will use a widening instruction // and generate worse code, like `movzx` instead of `mov` // on x64. return false; } JITDUMP("Replacing STORE_OBJ with STOREIND for [%06u]\n", blkNode->gtTreeID); blkNode->ChangeOper(GT_STOREIND); blkNode->ChangeType(regType); if ((blkNode->gtFlags & GTF_IND_TGT_NOT_HEAP) == 0) { blkNode->gtFlags |= GTF_IND_TGTANYWHERE; } if (varTypeIsStruct(src)) { src->ChangeType(regType); LowerNode(blkNode->Data()); } else if (src->OperIsInitVal()) { GenTreeUnOp* initVal = src->AsUnOp(); src = src->gtGetOp1(); assert(src->IsCnsIntOrI()); src->AsIntCon()->FixupInitBlkValue(regType); blkNode->SetData(src); BlockRange().Remove(initVal); } else { assert(src->TypeIs(regType) || src->IsCnsIntOrI() || src->IsCall()); } LowerStoreIndirCommon(blkNode->AsStoreInd()); return true; }
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/unwinder/arm/unwinder_arm.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #ifndef __unwinder_arm__ #define __unwinder_arm__ #include "unwinder.h" //--------------------------------------------------------------------------------------- // // See the comment for the base class code:OOPStackUnwinder. // class OOPStackUnwinderArm : public OOPStackUnwinder { public: // Unwind the given CONTEXT to the caller CONTEXT. The CONTEXT will be overwritten. BOOL Unwind(T_CONTEXT * pContext); // // Everything below comes from dbghelp.dll. // protected: HRESULT UnwindPrologue(_In_ DWORD64 ImageBase, _In_ DWORD64 ControlPc, _In_ DWORD64 FrameBase, _In_ _PIMAGE_RUNTIME_FUNCTION_ENTRY FunctionEntry, __inout PT_CONTEXT ContextRecord); HRESULT VirtualUnwind(_In_ DWORD64 ImageBase, _In_ DWORD64 ControlPc, _In_ _PIMAGE_RUNTIME_FUNCTION_ENTRY FunctionEntry, __inout PT_CONTEXT ContextRecord, _Out_ PDWORD64 EstablisherFrame); DWORD64 LookupPrimaryUnwindInfo (_In_ _PIMAGE_RUNTIME_FUNCTION_ENTRY FunctionEntry, _In_ DWORD64 ImageBase, _Out_ _PIMAGE_RUNTIME_FUNCTION_ENTRY PrimaryEntry); _PIMAGE_RUNTIME_FUNCTION_ENTRY SameFunction (_In_ _PIMAGE_RUNTIME_FUNCTION_ENTRY FunctionEntry, _In_ DWORD64 ImageBase, _In_ DWORD64 ControlPc, _Out_ _PIMAGE_RUNTIME_FUNCTION_ENTRY FunctionReturnBuffer); }; #endif // __unwinder_arm__
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #ifndef __unwinder_arm__ #define __unwinder_arm__ #include "unwinder.h" //--------------------------------------------------------------------------------------- // // See the comment for the base class code:OOPStackUnwinder. // class OOPStackUnwinderArm : public OOPStackUnwinder { public: // Unwind the given CONTEXT to the caller CONTEXT. The CONTEXT will be overwritten. BOOL Unwind(T_CONTEXT * pContext); // // Everything below comes from dbghelp.dll. // protected: HRESULT UnwindPrologue(_In_ DWORD64 ImageBase, _In_ DWORD64 ControlPc, _In_ DWORD64 FrameBase, _In_ _PIMAGE_RUNTIME_FUNCTION_ENTRY FunctionEntry, __inout PT_CONTEXT ContextRecord); HRESULT VirtualUnwind(_In_ DWORD64 ImageBase, _In_ DWORD64 ControlPc, _In_ _PIMAGE_RUNTIME_FUNCTION_ENTRY FunctionEntry, __inout PT_CONTEXT ContextRecord, _Out_ PDWORD64 EstablisherFrame); DWORD64 LookupPrimaryUnwindInfo (_In_ _PIMAGE_RUNTIME_FUNCTION_ENTRY FunctionEntry, _In_ DWORD64 ImageBase, _Out_ _PIMAGE_RUNTIME_FUNCTION_ENTRY PrimaryEntry); _PIMAGE_RUNTIME_FUNCTION_ENTRY SameFunction (_In_ _PIMAGE_RUNTIME_FUNCTION_ENTRY FunctionEntry, _In_ DWORD64 ImageBase, _In_ DWORD64 ControlPc, _Out_ _PIMAGE_RUNTIME_FUNCTION_ENTRY FunctionReturnBuffer); }; #endif // __unwinder_arm__
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/pal/tests/palsuite/c_runtime/vfprintf/vfprintf.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: vfprintf.h ** ** Purpose: Contains common testing functions for vfprintf ** ** **==========================================================================*/ #ifndef __vfprintf_H__ #define __vfprintf_H__ inline int DoVfprintf(FILE *fp, const char *format, ...) { int retVal; va_list arglist; va_start(arglist, format); retVal = vfprintf(fp, format, arglist); va_end(arglist); return (retVal); } inline void DoStrTest_vfprintf(const char *formatstr, char* param, const char *checkstr) { FILE *fp; char buf[256] = { 0 }; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, param)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, 100, fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (memcmp(buf, checkstr, strlen(checkstr) + 1) != 0) { Fail("ERROR: failed to insert string \"%s\" into \"%s\"\n" "Expected \"%s\" got \"%s\".\n", param, formatstr, checkstr, buf); } fclose(fp); } #define DoStrTest DoStrTest_vfprintf inline void DoWStrTest_vfprintf(const char *formatstr, WCHAR* param, const char *checkstr) { FILE *fp; char buf[256] = { 0 }; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, param)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, 100, fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (memcmp(buf, checkstr, strlen(checkstr) + 1) != 0) { Fail("ERROR: failed to insert wide string \"%S\" into \"%s\"\n" "Expected \"%s\" got \"%s\".\n", param, formatstr, checkstr, buf); } fclose(fp); } #define DoWStrTest DoWStrTest_vfprintf inline void DoPointerTest_vfprintf(const char *formatstr, void* param, char* paramstr, const char *checkstr1) { FILE *fp; char buf[256] = { 0 }; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, param)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, 100, fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (memcmp(buf, checkstr1, strlen(checkstr1) + 1) != 0) { Fail("ERROR: failed to insert %s into \"%s\"\n" "Expected \"%s\" got \"%s\".\n", paramstr, formatstr, checkstr1, buf); } if ((fclose( fp )) != 0) { Fail("ERROR: fclose failed to close \"testfile.txt\"\n"); } } #define DoPointerTest DoPointerTest_vfprintf inline void DoCountTest_vfprintf(const char *formatstr, int param, const char *checkstr) { FILE *fp; char buf[512] = { 0 }; int n = -1; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, &n)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, sizeof(buf), fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (n != param) { Fail("ERROR: Expected count parameter to resolve to %d, got %X\n", param, n); } if (memcmp(buf, checkstr, strlen(checkstr) + 1) != 0) { Fail("ERROR: Expected \"%s\" got \"%s\".\n", checkstr, buf); } if ((fclose( fp )) != 0) { Fail("ERROR: fclose failed to close \"testfile.txt\"\n"); } } #define DoCountTest DoCountTest_vfprintf inline void DoShortCountTest_vfprintf(const char *formatstr, int param, const char *checkstr) { FILE *fp; char buf[512] = { 0 }; short int n = -1; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, &n)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, 100, fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (n != param) { Fail("ERROR: Expected count parameter to resolve to %d, got %X\n", param, n); } if (memcmp(buf, checkstr, strlen(checkstr) + 1) != 0) { Fail("ERROR: Expected \"%s\" got \"%s\".\n", checkstr, buf); } if ((fclose( fp )) != 0) { Fail("ERROR: fclose failed to close \"testfile.txt\"\n"); } } #define DoShortCountTest DoShortCountTest_vfprintf inline void DoCharTest_vfprintf(const char *formatstr, char param, const char *checkstr) { FILE *fp; char buf[256] = { 0 }; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, param)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, 100, fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (memcmp(buf, checkstr, strlen(checkstr) + 1) != 0) { Fail("ERROR: failed to insert char \'%c\' (%d) into \"%s\"\n" "Expected \"%s\" got \"%s\".\n", param, param, formatstr, checkstr, buf); } fclose(fp); } #define DoCharTest DoCharTest_vfprintf inline void DoWCharTest_vfprintf(const char *formatstr, WCHAR param, const char *checkstr) { FILE *fp; char buf[256] = { 0 }; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, param)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, 100, fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (memcmp(buf, checkstr, strlen(checkstr) + 1) != 0) { Fail("ERROR: failed to insert wide char \'%c\' (%d) into \"%s\"\n" "Expected \"%s\" got \"%s\".\n", (char)param, param, formatstr, checkstr, buf); } fclose(fp); } #define DoWCharTest DoWCharTest_vfprintf inline void DoNumTest_vfprintf(const char *formatstr, int value, const char *checkstr) { FILE *fp; char buf[256] = { 0 }; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, value)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, 100, fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (memcmp(buf, checkstr, strlen(checkstr) + 1) != 0) { Fail("ERROR: failed to insert %#x into \"%s\"\n" "Expected \"%s\" got \"%s\".\n", value, formatstr, checkstr, buf); } fclose(fp); } #define DoNumTest DoNumTest_vfprintf inline void DoI64Test_vfprintf(const char *formatstr, INT64 value, char *valuestr, const char *checkstr1) { FILE *fp; char buf[256] = { 0 }; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, value)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, 100, fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (memcmp(buf, checkstr1, strlen(checkstr1) + 1) != 0) { Fail("ERROR: failed to insert %s into \"%s\"\n" "Expected \"%s\", got \"%s\".\n", valuestr, formatstr, checkstr1, buf); } fclose(fp); } #define DoI64Test DoI64Test_vfprintf inline void DoDoubleTest_vfprintf(const char *formatstr, double value, const char *checkstr1, const char *checkstr2) { FILE *fp; char buf[256] = { 0 }; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, value)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, 100, fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (memcmp(buf, checkstr1, strlen(checkstr1) + 1) != 0 && memcmp(buf, checkstr2, strlen(checkstr2) + 1) != 0) { Fail("ERROR: failed to insert %f into \"%s\"\n" "Expected \"%s\" or \"%s\", got \"%s\".\n", value, formatstr, checkstr1, checkstr2, buf); } fclose(fp); } #define DoDoubleTest DoDoubleTest_vfprintf inline void DoArgumentPrecTest_vfprintf(const char *formatstr, int precision, void *param, char *paramstr, const char *checkstr1, const char *checkstr2) { FILE *fp; char buf[256]; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, precision, param)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, 100, fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (memcmp(buf, checkstr1, strlen(checkstr1) + 1) != 0 && memcmp(buf, checkstr2, strlen(checkstr2) + 1) != 0) { Fail("ERROR: failed to insert %s into \"%s\" with precision %d\n" "Expected \"%s\" or \"%s\", got \"%s\".\n", paramstr, formatstr, precision, checkstr1, checkstr2, buf); } if ((fclose( fp )) != 0) { Fail("ERROR: fclose failed to close \"testfile.txt\"\n"); } } #define DoArgumentPrecTest DoArgumentPrecTest_vfprintf inline void DoArgumentPrecDoubleTest_vfprintf(const char *formatstr, int precision, double param, const char *checkstr1, const char *checkstr2) { FILE *fp; char buf[256]; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, precision, param)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, 100, fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (memcmp(buf, checkstr1, strlen(checkstr1) + 1) != 0 && memcmp(buf, checkstr2, strlen(checkstr2) + 1) != 0) { Fail("ERROR: failed to insert %f into \"%s\" with precision %d\n" "Expected \"%s\" or \"%s\", got \"%s\".\n", param, formatstr, precision, checkstr1, checkstr2, buf); } if ((fclose( fp )) != 0) { Fail("ERROR: fclose failed to close \"testfile.txt\"\n"); } } #define DoArgumentPrecDoubleTest DoArgumentPrecDoubleTest_vfprintf #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: vfprintf.h ** ** Purpose: Contains common testing functions for vfprintf ** ** **==========================================================================*/ #ifndef __vfprintf_H__ #define __vfprintf_H__ inline int DoVfprintf(FILE *fp, const char *format, ...) { int retVal; va_list arglist; va_start(arglist, format); retVal = vfprintf(fp, format, arglist); va_end(arglist); return (retVal); } inline void DoStrTest_vfprintf(const char *formatstr, char* param, const char *checkstr) { FILE *fp; char buf[256] = { 0 }; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, param)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, 100, fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (memcmp(buf, checkstr, strlen(checkstr) + 1) != 0) { Fail("ERROR: failed to insert string \"%s\" into \"%s\"\n" "Expected \"%s\" got \"%s\".\n", param, formatstr, checkstr, buf); } fclose(fp); } #define DoStrTest DoStrTest_vfprintf inline void DoWStrTest_vfprintf(const char *formatstr, WCHAR* param, const char *checkstr) { FILE *fp; char buf[256] = { 0 }; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, param)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, 100, fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (memcmp(buf, checkstr, strlen(checkstr) + 1) != 0) { Fail("ERROR: failed to insert wide string \"%S\" into \"%s\"\n" "Expected \"%s\" got \"%s\".\n", param, formatstr, checkstr, buf); } fclose(fp); } #define DoWStrTest DoWStrTest_vfprintf inline void DoPointerTest_vfprintf(const char *formatstr, void* param, char* paramstr, const char *checkstr1) { FILE *fp; char buf[256] = { 0 }; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, param)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, 100, fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (memcmp(buf, checkstr1, strlen(checkstr1) + 1) != 0) { Fail("ERROR: failed to insert %s into \"%s\"\n" "Expected \"%s\" got \"%s\".\n", paramstr, formatstr, checkstr1, buf); } if ((fclose( fp )) != 0) { Fail("ERROR: fclose failed to close \"testfile.txt\"\n"); } } #define DoPointerTest DoPointerTest_vfprintf inline void DoCountTest_vfprintf(const char *formatstr, int param, const char *checkstr) { FILE *fp; char buf[512] = { 0 }; int n = -1; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, &n)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, sizeof(buf), fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (n != param) { Fail("ERROR: Expected count parameter to resolve to %d, got %X\n", param, n); } if (memcmp(buf, checkstr, strlen(checkstr) + 1) != 0) { Fail("ERROR: Expected \"%s\" got \"%s\".\n", checkstr, buf); } if ((fclose( fp )) != 0) { Fail("ERROR: fclose failed to close \"testfile.txt\"\n"); } } #define DoCountTest DoCountTest_vfprintf inline void DoShortCountTest_vfprintf(const char *formatstr, int param, const char *checkstr) { FILE *fp; char buf[512] = { 0 }; short int n = -1; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, &n)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, 100, fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (n != param) { Fail("ERROR: Expected count parameter to resolve to %d, got %X\n", param, n); } if (memcmp(buf, checkstr, strlen(checkstr) + 1) != 0) { Fail("ERROR: Expected \"%s\" got \"%s\".\n", checkstr, buf); } if ((fclose( fp )) != 0) { Fail("ERROR: fclose failed to close \"testfile.txt\"\n"); } } #define DoShortCountTest DoShortCountTest_vfprintf inline void DoCharTest_vfprintf(const char *formatstr, char param, const char *checkstr) { FILE *fp; char buf[256] = { 0 }; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, param)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, 100, fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (memcmp(buf, checkstr, strlen(checkstr) + 1) != 0) { Fail("ERROR: failed to insert char \'%c\' (%d) into \"%s\"\n" "Expected \"%s\" got \"%s\".\n", param, param, formatstr, checkstr, buf); } fclose(fp); } #define DoCharTest DoCharTest_vfprintf inline void DoWCharTest_vfprintf(const char *formatstr, WCHAR param, const char *checkstr) { FILE *fp; char buf[256] = { 0 }; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, param)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, 100, fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (memcmp(buf, checkstr, strlen(checkstr) + 1) != 0) { Fail("ERROR: failed to insert wide char \'%c\' (%d) into \"%s\"\n" "Expected \"%s\" got \"%s\".\n", (char)param, param, formatstr, checkstr, buf); } fclose(fp); } #define DoWCharTest DoWCharTest_vfprintf inline void DoNumTest_vfprintf(const char *formatstr, int value, const char *checkstr) { FILE *fp; char buf[256] = { 0 }; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, value)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, 100, fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (memcmp(buf, checkstr, strlen(checkstr) + 1) != 0) { Fail("ERROR: failed to insert %#x into \"%s\"\n" "Expected \"%s\" got \"%s\".\n", value, formatstr, checkstr, buf); } fclose(fp); } #define DoNumTest DoNumTest_vfprintf inline void DoI64Test_vfprintf(const char *formatstr, INT64 value, char *valuestr, const char *checkstr1) { FILE *fp; char buf[256] = { 0 }; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, value)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, 100, fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (memcmp(buf, checkstr1, strlen(checkstr1) + 1) != 0) { Fail("ERROR: failed to insert %s into \"%s\"\n" "Expected \"%s\", got \"%s\".\n", valuestr, formatstr, checkstr1, buf); } fclose(fp); } #define DoI64Test DoI64Test_vfprintf inline void DoDoubleTest_vfprintf(const char *formatstr, double value, const char *checkstr1, const char *checkstr2) { FILE *fp; char buf[256] = { 0 }; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, value)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, 100, fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (memcmp(buf, checkstr1, strlen(checkstr1) + 1) != 0 && memcmp(buf, checkstr2, strlen(checkstr2) + 1) != 0) { Fail("ERROR: failed to insert %f into \"%s\"\n" "Expected \"%s\" or \"%s\", got \"%s\".\n", value, formatstr, checkstr1, checkstr2, buf); } fclose(fp); } #define DoDoubleTest DoDoubleTest_vfprintf inline void DoArgumentPrecTest_vfprintf(const char *formatstr, int precision, void *param, char *paramstr, const char *checkstr1, const char *checkstr2) { FILE *fp; char buf[256]; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, precision, param)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, 100, fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (memcmp(buf, checkstr1, strlen(checkstr1) + 1) != 0 && memcmp(buf, checkstr2, strlen(checkstr2) + 1) != 0) { Fail("ERROR: failed to insert %s into \"%s\" with precision %d\n" "Expected \"%s\" or \"%s\", got \"%s\".\n", paramstr, formatstr, precision, checkstr1, checkstr2, buf); } if ((fclose( fp )) != 0) { Fail("ERROR: fclose failed to close \"testfile.txt\"\n"); } } #define DoArgumentPrecTest DoArgumentPrecTest_vfprintf inline void DoArgumentPrecDoubleTest_vfprintf(const char *formatstr, int precision, double param, const char *checkstr1, const char *checkstr2) { FILE *fp; char buf[256]; if ((fp = fopen("testfile.txt", "w+")) == NULL ) { Fail("ERROR: fopen failed to create testfile\n"); } if ((DoVfprintf(fp, formatstr, precision, param)) < 0) { Fail("ERROR: vfprintf failed\n"); } if ((fseek(fp, 0, SEEK_SET)) != 0) { Fail("ERROR: fseek failed\n"); } if ((fgets(buf, 100, fp)) == NULL) { Fail("ERROR: fgets failed\n"); } if (memcmp(buf, checkstr1, strlen(checkstr1) + 1) != 0 && memcmp(buf, checkstr2, strlen(checkstr2) + 1) != 0) { Fail("ERROR: failed to insert %f into \"%s\" with precision %d\n" "Expected \"%s\" or \"%s\", got \"%s\".\n", param, formatstr, precision, checkstr1, checkstr2, buf); } if ((fclose( fp )) != 0) { Fail("ERROR: fclose failed to close \"testfile.txt\"\n"); } } #define DoArgumentPrecDoubleTest DoArgumentPrecDoubleTest_vfprintf #endif
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/jit/jiteh.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Exception Handling XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ #ifndef _EH_H_ #define _EH_H_ struct BasicBlock; class Compiler; /*****************************************************************************/ // The following holds the table of exception handlers. enum EHHandlerType { EH_HANDLER_CATCH = 0x1, // Don't use zero (to aid debugging uninitialized memory) EH_HANDLER_FILTER, EH_HANDLER_FAULT, EH_HANDLER_FINALLY, EH_HANDLER_FAULT_WAS_FINALLY }; // ToCORINFO_EH_CLAUSE_FLAGS: Convert an internal EHHandlerType to a CORINFO_EH_CLAUSE_FLAGS value // to pass back to the VM. inline CORINFO_EH_CLAUSE_FLAGS ToCORINFO_EH_CLAUSE_FLAGS(EHHandlerType type) { switch (type) { case EH_HANDLER_CATCH: return CORINFO_EH_CLAUSE_NONE; case EH_HANDLER_FILTER: return CORINFO_EH_CLAUSE_FILTER; case EH_HANDLER_FAULT: case EH_HANDLER_FAULT_WAS_FINALLY: return CORINFO_EH_CLAUSE_FAULT; case EH_HANDLER_FINALLY: return CORINFO_EH_CLAUSE_FINALLY; default: unreached(); } } // ToEHHandlerType: Convert a CORINFO_EH_CLAUSE_FLAGS value obtained from the VM in the EH clause structure // to the internal EHHandlerType type. inline EHHandlerType ToEHHandlerType(CORINFO_EH_CLAUSE_FLAGS flags) { if (flags & CORINFO_EH_CLAUSE_FAULT) { return EH_HANDLER_FAULT; } else if (flags & CORINFO_EH_CLAUSE_FINALLY) { return EH_HANDLER_FINALLY; } else if (flags & CORINFO_EH_CLAUSE_FILTER) { return EH_HANDLER_FILTER; } else { // If it's none of the others, assume it is a try/catch. /* XXX Fri 11/7/2008 * The VM (and apparently VC) stick in extra bits in the flags field. We ignore any flags * we don't know about. */ return EH_HANDLER_CATCH; } } struct EHblkDsc { BasicBlock* ebdTryBeg; // First block of the try BasicBlock* ebdTryLast; // Last block of the try BasicBlock* ebdHndBeg; // First block of the handler BasicBlock* ebdHndLast; // Last block of the handler union { BasicBlock* ebdFilter; // First block of filter, if HasFilter() unsigned ebdTyp; // Exception type (a class token), otherwise }; EHHandlerType ebdHandlerType; #if !defined(FEATURE_EH_FUNCLETS) // How nested is the try/handler within other *handlers* - 0 for outermost clauses, 1 for nesting with a handler, // etc. unsigned short ebdHandlerNestingLevel; #endif // !FEATURE_EH_FUNCLETS static const unsigned short NO_ENCLOSING_INDEX = USHRT_MAX; // The index of the enclosing outer try region, NO_ENCLOSING_INDEX if none. // Be careful of 'mutually protect' catch and filter clauses (multiple // handlers with the same try region): the try regions 'nest' so we set // ebdEnclosingTryIndex, but the inner catch is *NOT* nested within the outer catch! // That is, if the "inner catch" throws an exception, it won't be caught by // the "outer catch" for mutually protect handlers. unsigned short ebdEnclosingTryIndex; // The index of the enclosing outer handler region, NO_ENCLOSING_INDEX if none. unsigned short ebdEnclosingHndIndex; #if defined(FEATURE_EH_FUNCLETS) // After funclets are created, this is the index of corresponding FuncInfoDsc // Special case for Filter/Filter-handler: // Like the IL the filter funclet immediately preceeds the filter-handler funclet. // So this index points to the filter-handler funclet. If you want the filter // funclet index, just subtract 1. unsigned short ebdFuncIndex; #endif // FEATURE_EH_FUNCLETS IL_OFFSET ebdTryBegOffset; // IL offsets of EH try/end regions as they are imported IL_OFFSET ebdTryEndOffset; IL_OFFSET ebdFilterBegOffset; // only set if HasFilter() IL_OFFSET ebdHndBegOffset; IL_OFFSET ebdHndEndOffset; // Returns the last block of the filter. Assumes the EH clause is a try/filter/filter-handler type. BasicBlock* BBFilterLast(); bool HasCatchHandler(); bool HasFilter(); bool HasFinallyHandler(); bool HasFaultHandler(); bool HasFinallyOrFaultHandler(); // Returns the block to which control will flow if an (otherwise-uncaught) exception is raised // in the try. This is normally "ebdHndBeg", unless the try region has a filter, in which case that is returned. // (This is, in some sense, the "true handler," at least in the sense of control flow. Note // that we model the transition from a filter to its handler as normal, non-exceptional control flow.) BasicBlock* ExFlowBlock(); bool InTryRegionILRange(BasicBlock* pBlk); bool InFilterRegionILRange(BasicBlock* pBlk); bool InHndRegionILRange(BasicBlock* pBlk); bool InTryRegionBBRange(BasicBlock* pBlk); bool InFilterRegionBBRange(BasicBlock* pBlk); bool InHndRegionBBRange(BasicBlock* pBlk); IL_OFFSET ebdTryBegOffs(); IL_OFFSET ebdTryEndOffs(); IL_OFFSET ebdFilterBegOffs(); IL_OFFSET ebdFilterEndOffs(); IL_OFFSET ebdHndBegOffs(); IL_OFFSET ebdHndEndOffs(); static bool ebdIsSameILTry(EHblkDsc* h1, EHblkDsc* h2); // Same 'try' region? Compare IL range. // Return the region index of the most nested EH region that encloses this region, or NO_ENCLOSING_INDEX // if this region is directly in the main function body. Set '*inTryRegion' to 'true' if this region is // most nested within a 'try' region, or 'false' if this region is most nested within a handler. (Note // that filters cannot contain nested EH regions.) unsigned ebdGetEnclosingRegionIndex(bool* inTryRegion); static bool ebdIsSameTry(EHblkDsc* h1, EHblkDsc* h2); // Same 'try' region? Compare begin/last blocks. bool ebdIsSameTry(Compiler* comp, unsigned t2); bool ebdIsSameTry(BasicBlock* ebdTryBeg, BasicBlock* ebdTryLast); #ifdef DEBUG void DispEntry(unsigned num); // Display this table entry #endif // DEBUG private: static bool InBBRange(BasicBlock* pBlk, BasicBlock* pStart, BasicBlock* pEnd); }; /*****************************************************************************/ #endif // _EH_H_ /*****************************************************************************/
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Exception Handling XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ #ifndef _EH_H_ #define _EH_H_ struct BasicBlock; class Compiler; /*****************************************************************************/ // The following holds the table of exception handlers. enum EHHandlerType { EH_HANDLER_CATCH = 0x1, // Don't use zero (to aid debugging uninitialized memory) EH_HANDLER_FILTER, EH_HANDLER_FAULT, EH_HANDLER_FINALLY, EH_HANDLER_FAULT_WAS_FINALLY }; // ToCORINFO_EH_CLAUSE_FLAGS: Convert an internal EHHandlerType to a CORINFO_EH_CLAUSE_FLAGS value // to pass back to the VM. inline CORINFO_EH_CLAUSE_FLAGS ToCORINFO_EH_CLAUSE_FLAGS(EHHandlerType type) { switch (type) { case EH_HANDLER_CATCH: return CORINFO_EH_CLAUSE_NONE; case EH_HANDLER_FILTER: return CORINFO_EH_CLAUSE_FILTER; case EH_HANDLER_FAULT: case EH_HANDLER_FAULT_WAS_FINALLY: return CORINFO_EH_CLAUSE_FAULT; case EH_HANDLER_FINALLY: return CORINFO_EH_CLAUSE_FINALLY; default: unreached(); } } // ToEHHandlerType: Convert a CORINFO_EH_CLAUSE_FLAGS value obtained from the VM in the EH clause structure // to the internal EHHandlerType type. inline EHHandlerType ToEHHandlerType(CORINFO_EH_CLAUSE_FLAGS flags) { if (flags & CORINFO_EH_CLAUSE_FAULT) { return EH_HANDLER_FAULT; } else if (flags & CORINFO_EH_CLAUSE_FINALLY) { return EH_HANDLER_FINALLY; } else if (flags & CORINFO_EH_CLAUSE_FILTER) { return EH_HANDLER_FILTER; } else { // If it's none of the others, assume it is a try/catch. /* XXX Fri 11/7/2008 * The VM (and apparently VC) stick in extra bits in the flags field. We ignore any flags * we don't know about. */ return EH_HANDLER_CATCH; } } struct EHblkDsc { BasicBlock* ebdTryBeg; // First block of the try BasicBlock* ebdTryLast; // Last block of the try BasicBlock* ebdHndBeg; // First block of the handler BasicBlock* ebdHndLast; // Last block of the handler union { BasicBlock* ebdFilter; // First block of filter, if HasFilter() unsigned ebdTyp; // Exception type (a class token), otherwise }; EHHandlerType ebdHandlerType; #if !defined(FEATURE_EH_FUNCLETS) // How nested is the try/handler within other *handlers* - 0 for outermost clauses, 1 for nesting with a handler, // etc. unsigned short ebdHandlerNestingLevel; #endif // !FEATURE_EH_FUNCLETS static const unsigned short NO_ENCLOSING_INDEX = USHRT_MAX; // The index of the enclosing outer try region, NO_ENCLOSING_INDEX if none. // Be careful of 'mutually protect' catch and filter clauses (multiple // handlers with the same try region): the try regions 'nest' so we set // ebdEnclosingTryIndex, but the inner catch is *NOT* nested within the outer catch! // That is, if the "inner catch" throws an exception, it won't be caught by // the "outer catch" for mutually protect handlers. unsigned short ebdEnclosingTryIndex; // The index of the enclosing outer handler region, NO_ENCLOSING_INDEX if none. unsigned short ebdEnclosingHndIndex; #if defined(FEATURE_EH_FUNCLETS) // After funclets are created, this is the index of corresponding FuncInfoDsc // Special case for Filter/Filter-handler: // Like the IL the filter funclet immediately preceeds the filter-handler funclet. // So this index points to the filter-handler funclet. If you want the filter // funclet index, just subtract 1. unsigned short ebdFuncIndex; #endif // FEATURE_EH_FUNCLETS IL_OFFSET ebdTryBegOffset; // IL offsets of EH try/end regions as they are imported IL_OFFSET ebdTryEndOffset; IL_OFFSET ebdFilterBegOffset; // only set if HasFilter() IL_OFFSET ebdHndBegOffset; IL_OFFSET ebdHndEndOffset; // Returns the last block of the filter. Assumes the EH clause is a try/filter/filter-handler type. BasicBlock* BBFilterLast(); bool HasCatchHandler(); bool HasFilter(); bool HasFinallyHandler(); bool HasFaultHandler(); bool HasFinallyOrFaultHandler(); // Returns the block to which control will flow if an (otherwise-uncaught) exception is raised // in the try. This is normally "ebdHndBeg", unless the try region has a filter, in which case that is returned. // (This is, in some sense, the "true handler," at least in the sense of control flow. Note // that we model the transition from a filter to its handler as normal, non-exceptional control flow.) BasicBlock* ExFlowBlock(); bool InTryRegionILRange(BasicBlock* pBlk); bool InFilterRegionILRange(BasicBlock* pBlk); bool InHndRegionILRange(BasicBlock* pBlk); bool InTryRegionBBRange(BasicBlock* pBlk); bool InFilterRegionBBRange(BasicBlock* pBlk); bool InHndRegionBBRange(BasicBlock* pBlk); IL_OFFSET ebdTryBegOffs(); IL_OFFSET ebdTryEndOffs(); IL_OFFSET ebdFilterBegOffs(); IL_OFFSET ebdFilterEndOffs(); IL_OFFSET ebdHndBegOffs(); IL_OFFSET ebdHndEndOffs(); static bool ebdIsSameILTry(EHblkDsc* h1, EHblkDsc* h2); // Same 'try' region? Compare IL range. // Return the region index of the most nested EH region that encloses this region, or NO_ENCLOSING_INDEX // if this region is directly in the main function body. Set '*inTryRegion' to 'true' if this region is // most nested within a 'try' region, or 'false' if this region is most nested within a handler. (Note // that filters cannot contain nested EH regions.) unsigned ebdGetEnclosingRegionIndex(bool* inTryRegion); static bool ebdIsSameTry(EHblkDsc* h1, EHblkDsc* h2); // Same 'try' region? Compare begin/last blocks. bool ebdIsSameTry(Compiler* comp, unsigned t2); bool ebdIsSameTry(BasicBlock* ebdTryBeg, BasicBlock* ebdTryLast); #ifdef DEBUG void DispEntry(unsigned num); // Display this table entry #endif // DEBUG private: static bool InBBRange(BasicBlock* pBlk, BasicBlock* pStart, BasicBlock* pEnd); }; /*****************************************************************************/ #endif // _EH_H_ /*****************************************************************************/
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/mono/mono/tests/mixed-mode/PureMsvcrtApp/PureMsvcrtApp.cpp
#include <vcclr.h> using namespace System; using namespace System::Runtime::InteropServices; void WriteStringManaged (const wchar_t* str) { Console::WriteLine (Marshal::PtrToStringUni ((IntPtr) (void*) str)); } int main (array<System::String^> ^args) { Console::WriteLine (L"Pure MSVCRT console application"); pin_ptr<const wchar_t> str = PtrToStringChars (L"WriteStringManaged"); WriteStringManaged (str); return 0; }
#include <vcclr.h> using namespace System; using namespace System::Runtime::InteropServices; void WriteStringManaged (const wchar_t* str) { Console::WriteLine (Marshal::PtrToStringUni ((IntPtr) (void*) str)); } int main (array<System::String^> ^args) { Console::WriteLine (L"Pure MSVCRT console application"); pin_ptr<const wchar_t> str = PtrToStringChars (L"WriteStringManaged"); WriteStringManaged (str); return 0; }
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/vm/fcall.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // FCall.H // // // FCall is a high-performance alternative to ECall. Unlike ECall, FCall // methods do not necessarily create a frame. Jitted code calls directly // to the FCall entry point. It is possible to do operations that need // to have a frame within an FCall, you need to manually set up the frame // before you do such operations. // It is illegal to cause a GC or EH to happen in an FCALL before setting // up a frame. To prevent accidentally violating this rule, FCALLs turn // on BEGINGCFORBID, which insures that these things can't happen in a // checked build without causing an ASSERTE. Once you set up a frame, // this state is turned off as long as the frame is active, and then is // turned on again when the frame is torn down. This mechanism should // be sufficient to insure that the rules are followed. // In general you set up a frame by using the following macros // HELPER_METHOD_FRAME_BEGIN_RET*() // Use If the FCALL has a return value // HELPER_METHOD_FRAME_BEGIN*() // Use If FCALL does not return a value // HELPER_METHOD_FRAME_END*() // These macros introduce a scope which is protected by an HelperMethodFrame. // In this scope you can do EH or GC. There are rules associated with // their use. In particular // 1) These macros can only be used in the body of a FCALL (that is // something using the FCIMPL* or HCIMPL* macros for their decaration. // 2) You may not perform a 'return' within this scope.. // Compile time errors occur if you try to violate either of these rules. // The frame that is set up does NOT protect any GC variables (in particular the // arguments of the FCALL. Thus you need to do an explicit GCPROTECT once the // frame is established if you need to protect an argument. There are flavors // of HELPER_METHOD_FRAME that protect a certain number of GC variables. For // example // HELPER_METHOD_FRAME_BEGIN_RET_2(arg1, arg2) // will protect the GC variables arg1, and arg2 as well as erecting the frame. // Another invariant that you must be aware of is the need to poll to see if // a GC is needed by some other thread. Unless the FCALL is VERY short, // every code path through the FCALL must do such a poll. The important // thing here is that a poll will cause a GC, and thus you can only do it // when all you GC variables are protected. To make things easier // HELPER_METHOD_FRAMES that protect things automatically do this poll. // If you don't need to protect anything HELPER_METHOD_FRAME_BEGIN_0 // will also do the poll. // Sometimes it is convenient to do the poll a the end of the frame, you // can use HELPER_METHOD_FRAME_BEGIN_NOPOLL and HELPER_METHOD_FRAME_END_POLL // to do the poll at the end. If somewhere in the middle is the best // place you can do that too with HELPER_METHOD_POLL() // You don't need to erect a helper method frame to do a poll. FC_GC_POLL // can do this (remember all your GC refs will be trashed). // Finally if your method is VERY small, you can get away without a poll, // you have to use FC_GC_POLL_NOT_NEEDED to mark this. // Use sparingly! // It is possible to set up the frame as the first operation in the FCALL and // tear it down as the last operation before returning. This works and is // reasonably efficient (as good as an ECall), however, if it is the case that // you can defer the setup of the frame to an unlikely code path (exception path) // that is much better. // If you defer setup of the frame, all codepaths leading to the frame setup // must be wrapped with PERMIT_HELPER_METHOD_FRAME_BEGIN/END. These block // certain compiler optimizations that interfere with the delayed frame setup. // These macros are automatically included in the HCIMPL, FCIMPL, and frame // setup macros. // <TODO>TODO: we should have a way of doing a trial allocation (an allocation that // will fail if it would cause a GC). That way even FCALLs that need to allocate // would not necessarily need to set up a frame. </TODO> // It is common to only need to set up a frame in order to throw an exception. // While this can be done by doing // HELPER_METHOD_FRAME_BEGIN() // Use if FCALL does not return a value // COMPlusThrow(execpt); // HELPER_METHOD_FRAME_END() // It is more efficient (in space) to use convenience macro FCTHROW that does // this for you (sets up a frame, and does the throw). // FCTHROW(except) // Since FCALLS have to conform to the EE calling conventions and not to C // calling conventions, FCALLS, need to be declared using special macros (FCIMPL*) // that implement the correct calling conventions. There are variants of these // macros depending on the number of args, and sometimes the types of the // arguments. //------------------------------------------------------------------------ // A very simple example: // // FCIMPL2(INT32, Div, INT32 x, INT32 y) // { // if (y == 0) // FCThrow(kDivideByZeroException); // return x/y; // } // FCIMPLEND // // // *** WATCH OUT FOR THESE GOTCHAS: *** // ------------------------------------ // - In your FCDECL & FCIMPL protos, don't declare a param as type OBJECTREF // or any of its deriveds. This will break on the checked build because // __fastcall doesn't enregister C++ objects (which OBJECTREF is). // Instead, you need to do something like; // // FCIMPL(.., .., Object* pObject0) // OBJECTREF pObject = ObjectToOBJECTREF(pObject0); // FCIMPL // // For similar reasons, use Object* rather than OBJECTREF as a return type. // Consider either using ObjectToOBJECTREF or calling VALIDATEOBJECTREF // to make sure your Object* is valid. // // - FCThrow() must be called directly from your FCall impl function: it // cannot be called from a subfunction. Calling from a subfunction breaks // the VC code parsing workaround that lets us recover the callee saved registers. // Fortunately, you'll get a compile error complaining about an // unknown variable "__me". // // - If your FCall returns VOID, you must use FCThrowVoid() rather than // FCThrow(). This is because FCThrow() has to generate an unexecuted // "return" statement for the code parser. // // - On x86, if first and/or second argument of your FCall cannot be passed // in either of the __fastcall registers (ECX/EDX), you must use "V" versions // of FCDECL and FCIMPL macros to enregister arguments correctly. Some of the // most common types that fit this requirement are 64-bit values (i.e. INT64 or // UINT64) and floating-point values (i.e. FLOAT or DOUBLE). For example, FCDECL3_IVI // must be used for FCalls that take 3 arguments and 2nd argument is INT64 and // FDECL2_VV must be used for FCalls that take 2 arguments where both are FLOAT. // // - You may use structs for protecting multiple OBJECTREF's simultaneously. // In these cases, you must use a variant of a helper method frame with PROTECT // in the name, to ensure all the OBJECTREF's in the struct get protected. // Also, initialize all the OBJECTREF's first. Like this: // // FCIMPL4(Object*, COMNlsInfo::nativeChangeCaseString, LocaleIDObject* localeUNSAFE, // INT_PTR pNativeTextInfo, StringObject* pStringUNSAFE, CLR_BOOL bIsToUpper) // { // [ignoring CONTRACT for now] // struct _gc // { // STRINGREF pResult; // STRINGREF pString; // LOCALEIDREF pLocale; // } gc; // gc.pResult = NULL; // gc.pString = ObjectToSTRINGREF(pStringUNSAFE); // gc.pLocale = (LOCALEIDREF)ObjectToOBJECTREF(localeUNSAFE); // // HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc) // // If you forgot the PROTECT part, the macro will only protect the first OBJECTREF, // introducing a subtle GC hole in your code. Fortunately, we now issue a // compile-time error if you forget. // How FCall works: // ---------------- // An FCall target uses __fastcall or some other calling convention to // match the IL calling convention exactly. Thus, a call to FCall is a direct // call to the target w/ no intervening stub or frame. // // The tricky part is when FCThrow is called. FCThrow must generate // a proper method frame before allocating and throwing the exception. // To do this, it must recover several things: // // - The location of the FCIMPL's return address (since that's // where the frame will be based.) // // - The on-entry values of the callee-saved regs; which must // be recorded in the frame so that GC can update them. // Depending on how VC compiles your FCIMPL, those values are still // in the original registers or saved on the stack. // // To figure out which, FCThrow() generates the code: // // while (NULL == __FCThrow(__me, ...)) {}; // return 0; // // The "return" statement will never execute; but its presence guarantees // that VC will follow the __FCThrow() call with a VC epilog // that restores the callee-saved registers using a pretty small // and predictable set of Intel opcodes. __FCThrow() parses this // epilog and simulates its execution to recover the callee saved // registers. // // The while loop is to prevent the compiler from doing tail call optimizations. // The helper frame interpretter needs the frame to be present. // // - The MethodDesc* that this FCall implements. This MethodDesc* // is part of the frame and ensures that the FCall will appear // in the exception's stack trace. To get this, FCDECL declares // a static local __me, initialized to point to the FC target itself. // This address is exactly what's stored in the ECall lookup tables; // so __FCThrow() simply does a reverse lookup on that table to recover // the MethodDesc*. // #ifndef __FCall_h__ #define __FCall_h__ #include "gms.h" #include "runtimeexceptionkind.h" #include "debugreturn.h" //============================================================================================== // These macros defeat compiler optimizations that might mix nonvolatile // register loads and stores with other code in the function body. This // creates problems for the frame setup code, which assumes that any // nonvolatiles that are saved at the point of the frame setup will be // re-loaded when the frame is popped. // // Currently this is only known to be an issue on AMD64. It's uncertain // whether it is an issue on x86. //============================================================================================== #if defined(TARGET_AMD64) && !defined(TARGET_UNIX) // // On AMD64 this is accomplished by including a setjmp anywhere in a function. // Doesn't matter whether it is reachable or not, and in fact in optimized // builds the setjmp is removed altogether. // #include <setjmp.h> // // Use of setjmp is temporary, we will eventually have compiler intrinsics to // disable the optimizations. Besides, we don't actually execute setjmp in // these macros (or anywhere else in the VM on AMD64). // #pragma warning(disable:4611) // interaction between '_setjmp' and C++ object destruction is non-portable #ifdef _DEBUG // // Linked list of unmanaged methods preceeding a HelperMethodFrame push. This // is linked onto the current Thread. Each list entry is stack-allocated so it // can be associated with an unmanaged frame. Each unmanaged frame needs to be // associated with at least one list entry. // struct HelperMethodFrameCallerList { HelperMethodFrameCallerList *pCaller; }; #endif // _DEBUG // // Resets the Thread state at a new managed -> fcall transition. // class FCallTransitionState { public: FCallTransitionState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); ~FCallTransitionState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); #ifdef _DEBUG private: Thread *m_pThread; HelperMethodFrameCallerList *m_pPreviousHelperMethodFrameCallerList; #endif // _DEBUG }; // // Pushes/pops state for each caller. // class PermitHelperMethodFrameState { public: PermitHelperMethodFrameState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); ~PermitHelperMethodFrameState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); static VOID CheckHelperMethodFramePermitted () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); #ifdef _DEBUG private: Thread *m_pThread; HelperMethodFrameCallerList m_ListEntry; #endif // _DEBUG }; // // Resets the Thread state after the HelperMethodFrame is pushed. At this // point, the HelperMethodFrame is capable of unwinding to the managed code, // so we can reset the Thread state for any nested fcalls. // class CompletedFCallTransitionState { public: CompletedFCallTransitionState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); ~CompletedFCallTransitionState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); #ifdef _DEBUG private: HelperMethodFrameCallerList *m_pLastHelperMethodFrameCallerList; #endif // _DEBUG }; #define PERMIT_HELPER_METHOD_FRAME_BEGIN() \ if (1) \ { \ PermitHelperMethodFrameState ___PermitHelperMethodFrameState; #define PERMIT_HELPER_METHOD_FRAME_END() \ } \ else \ { \ jmp_buf ___jmpbuf; \ setjmp(___jmpbuf); \ __assume(0); \ } #define FCALL_TRANSITION_BEGIN() \ FCallTransitionState ___FCallTransitionState; \ PERMIT_HELPER_METHOD_FRAME_BEGIN(); #define FCALL_TRANSITION_END() \ PERMIT_HELPER_METHOD_FRAME_END(); #define CHECK_HELPER_METHOD_FRAME_PERMITTED() \ PermitHelperMethodFrameState::CheckHelperMethodFramePermitted(); \ CompletedFCallTransitionState ___CompletedFCallTransitionState; #else // unsupported processor #define PERMIT_HELPER_METHOD_FRAME_BEGIN() #define PERMIT_HELPER_METHOD_FRAME_END() #define FCALL_TRANSITION_BEGIN() #define FCALL_TRANSITION_END() #define CHECK_HELPER_METHOD_FRAME_PERMITTED() #endif // unsupported processor //============================================================================================== // This is where FCThrow ultimately ends up. Never call this directly. // Use the FCThrow() macros. __FCThrowArgument is the helper to throw ArgumentExceptions // with a resource taken from the managed resource manager. //============================================================================================== LPVOID __FCThrow(LPVOID me, enum RuntimeExceptionKind reKind, UINT resID, LPCWSTR arg1, LPCWSTR arg2, LPCWSTR arg3); LPVOID __FCThrowArgument(LPVOID me, enum RuntimeExceptionKind reKind, LPCWSTR argumentName, LPCWSTR resourceName); //============================================================================================== // FDECLn: A set of macros for generating header declarations for FC targets. // Use FIMPLn for the actual body. //============================================================================================== // Note: on the x86, these defs reverse all but the first two arguments // (IL stack calling convention is reversed from __fastcall.) // Calling convention for varargs #define F_CALL_VA_CONV __cdecl #ifdef TARGET_X86 // Choose the appropriate calling convention for FCALL helpers on the basis of the JIT calling convention #ifdef __GNUC__ #define F_CALL_CONV __attribute__((cdecl, regparm(3))) // GCC FCALL convention (simulated via cdecl, regparm(3)) is different from MSVC FCALL convention. GCC can use up // to 3 registers to store parameters. The registers used are EAX, EDX, ECX. Dummy parameters and reordering // of the actual parameters in the FCALL signature is used to make the calling convention to look like in MSVC. #define SWIZZLE_REGARG_ORDER #else // __GNUC__ #define F_CALL_CONV __fastcall #endif // !__GNUC__ #define SWIZZLE_STKARG_ORDER #else // TARGET_X86 // // non-x86 platforms don't have messed-up calling convention swizzling // #define F_CALL_CONV #endif // !TARGET_X86 #ifdef SWIZZLE_STKARG_ORDER #ifdef SWIZZLE_REGARG_ORDER #define FCDECL0(rettype, funcname) rettype F_CALL_CONV funcname() #define FCDECL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1) #define FCDECL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a1) #define FCDECL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1) #define FCDECL2VA(rettype, funcname, a1, a2) rettype F_CALL_VA_CONV funcname(a1, a2, ...) #define FCDECL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a2, a1) #define FCDECL2_VI(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a2, a1) #define FCDECL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a2) #define FCDECL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) #define FCDECL3_IIV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) #define FCDECL3_VII(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a3, a2, a1) #define FCDECL3_IVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a3, a2) #define FCDECL3_IVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a3, a1, a2) #define FCDECL3_VVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a3, a2, a1) #define FCDECL3_VVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a3, a2, a1) #define FCDECL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a4, a3) #define FCDECL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a5, a4, a3) #define FCDECL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a6, a5, a4, a3) #define FCDECL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a7, a6, a5, a4, a3) #define FCDECL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a8, a7, a6, a5, a4, a3) #define FCDECL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a9, a8, a7, a6, a5, a4, a3) #define FCDECL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a3, a1, a5, a4, a2) #define FCDECL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a3, a2, a5, a4, a1) #else // SWIZZLE_REGARG_ORDER #define FCDECL0(rettype, funcname) rettype F_CALL_CONV funcname() #define FCDECL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) #define FCDECL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) #define FCDECL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL2VA(rettype, funcname, a1, a2) rettype F_CALL_VA_CONV funcname(a1, a2, ...) #define FCDECL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a2, a1) #define FCDECL2_VI(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a2, a1) #define FCDECL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_IIV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_VII(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a2, a3, a1) #define FCDECL3_IVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a3, a2) #define FCDECL3_IVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a3, a2) #define FCDECL3_VVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a2, a1, a3) #define FCDECL3_VVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a3, a2, a1) #define FCDECL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(a1, a2, a4, a3) #define FCDECL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a5, a4, a3) #define FCDECL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype F_CALL_CONV funcname(a1, a2, a6, a5, a4, a3) #define FCDECL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype F_CALL_CONV funcname(a1, a2, a7, a6, a5, a4, a3) #define FCDECL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype F_CALL_CONV funcname(a1, a2, a8, a7, a6, a5, a4, a3) #define FCDECL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype F_CALL_CONV funcname(a1, a2, a9, a8, a7, a6, a5, a4, a3) #define FCDECL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype F_CALL_CONV funcname(a1, a2, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype F_CALL_CONV funcname(a1, a2, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype F_CALL_CONV funcname(a1, a2, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype F_CALL_CONV funcname(a1, a2, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype F_CALL_CONV funcname(a1, a2, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a3, a5, a4, a2) #define FCDECL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a2, a3, a5, a4, a1) #endif // !SWIZZLE_REGARG_ORDER #if 0 // // don't use something like this... directly calling an FCALL from within the runtime breaks stackwalking because // the FCALL reverse mapping only gets established in ECall::GetFCallImpl and that codepath is circumvented by // directly calling and FCALL // See below for usage of FC_CALL_INNER (used in SecurityStackWalk::Check presently) // #define FCCALL0(funcname) funcname() #define FCCALL1(funcname, a1) funcname(a1) #define FCCALL2(funcname, a1, a2) funcname(a1, a2) #define FCCALL3(funcname, a1, a2, a3) funcname(a1, a2, a3) #define FCCALL4(funcname, a1, a2, a3, a4) funcname(a1, a2, a4, a3) #define FCCALL5(funcname, a1, a2, a3, a4, a5) funcname(a1, a2, a5, a4, a3) #define FCCALL6(funcname, a1, a2, a3, a4, a5, a6) funcname(a1, a2, a6, a5, a4, a3) #define FCCALL7(funcname, a1, a2, a3, a4, a5, a6, a7) funcname(a1, a2, a7, a6, a5, a4, a3) #define FCCALL8(funcname, a1, a2, a3, a4, a5, a6, a7, a8) funcname(a1, a2, a8, a7, a6, a5, a4, a3) #define FCCALL9(funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) funcname(a1, a2, a9, a8, a7, a6, a5, a4, a3) #define FCCALL10(funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) funcname(a1, a2, a10, a9, a8, a7, a6, a5, a4, a3) #define FCCALL11(funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) funcname(a1, a2, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCCALL12(funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) funcname(a1, a2, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #endif // 0 #else // !SWIZZLE_STKARG_ORDER #define FCDECL0(rettype, funcname) rettype F_CALL_CONV funcname() #define FCDECL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) #define FCDECL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) #define FCDECL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL2VA(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2, ...) #define FCDECL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL2_VI(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_IIV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_VII(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_IVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_IVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_VVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_VVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(a1, a2, a3, a4) #define FCDECL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5) #define FCDECL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6) #define FCDECL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7) #define FCDECL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8) #define FCDECL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9) #define FCDECL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) #define FCDECL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) #define FCDECL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) #define FCDECL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) #define FCDECL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) #define FCDECL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5) #define FCDECL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5) #endif // !SWIZZLE_STKARG_ORDER #define HELPER_FRAME_DECL(x) FrameWithCookie<HelperMethodFrame_##x##OBJ> __helperframe // use the capture state machinery if the architecture has one // // For a normal build we create a loop (see explaination on RestoreState below) // We don't want a loop here for PREFAST since that causes // warning 263: Using _alloca in a loop // And we can't use DEBUG_OK_TO_RETURN for PREFAST because the PREFAST version // requires that you already be in a DEBUG_ASSURE_NO_RETURN_BEGIN scope #define HelperMethodFrame_0OBJ HelperMethodFrame #define HELPER_FRAME_ARGS(attribs) __me, attribs #define FORLAZYMACHSTATE(x) x #if defined(_PREFAST_) #define FORLAZYMACHSTATE_BEGINLOOP(x) x #define FORLAZYMACHSTATE_ENDLOOP(x) #define FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_BEGIN #define FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_END #else #define FORLAZYMACHSTATE_BEGINLOOP(x) x do #define FORLAZYMACHSTATE_ENDLOOP(x) while(x) #define FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_BEGIN DEBUG_OK_TO_RETURN_BEGIN(LAZYMACHSTATE) #define FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_END DEBUG_OK_TO_RETURN_END(LAZYMACHSTATE) #endif // BEGIN: before gcpoll //FCallGCCanTriggerNoDtor __fcallGcCanTrigger; //__fcallGcCanTrigger.Enter(); // END: after gcpoll //__fcallGcCanTrigger.Leave(__FUNCTION__, __FILE__, __LINE__); // We have to put DEBUG_OK_TO_RETURN_BEGIN around the FORLAZYMACHSTATE // to allow the HELPER_FRAME to be installed inside an SO_INTOLERANT region // which does not allow a return. The return is used by FORLAZYMACHSTATE // to capture the state, but is not an actual return, so it is ok. #define HELPER_METHOD_FRAME_BEGIN_EX_BODY(ret, helperFrame, gcpoll, allowGC) \ FORLAZYMACHSTATE_BEGINLOOP(int alwaysZero = 0;) \ { \ INDEBUG(static BOOL __haveCheckedRestoreState = FALSE;) \ PERMIT_HELPER_METHOD_FRAME_BEGIN(); \ CHECK_HELPER_METHOD_FRAME_PERMITTED(); \ helperFrame; \ FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_BEGIN; \ FORLAZYMACHSTATE(CAPTURE_STATE(__helperframe.MachineState(), ret);) \ FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_END; \ INDEBUG(__helperframe.SetAddrOfHaveCheckedRestoreState(&__haveCheckedRestoreState)); \ DEBUG_ASSURE_NO_RETURN_BEGIN(HELPER_METHOD_FRAME); \ INCONTRACT(FCallGCCanTrigger::Enter()); #define HELPER_METHOD_FRAME_BEGIN_EX(ret, helperFrame, gcpoll, allowGC) \ HELPER_METHOD_FRAME_BEGIN_EX_BODY(ret, helperFrame, gcpoll, allowGC) \ /* <TODO>TODO TURN THIS ON!!! </TODO> */ \ /* gcpoll; */ \ INSTALL_MANAGED_EXCEPTION_DISPATCHER; \ __helperframe.Push(); \ MAKE_CURRENT_THREAD_AVAILABLE_EX(__helperframe.GetThread()); \ INSTALL_UNWIND_AND_CONTINUE_HANDLER_FOR_HMF(&__helperframe); #define HELPER_METHOD_FRAME_BEGIN_EX_NOTHROW(ret, helperFrame, gcpoll, allowGC, probeFailExpr) \ HELPER_METHOD_FRAME_BEGIN_EX_BODY(ret, helperFrame, gcpoll, allowGC) \ __helperframe.Push(); \ MAKE_CURRENT_THREAD_AVAILABLE_EX(__helperframe.GetThread()); \ /* <TODO>TODO TURN THIS ON!!! </TODO> */ \ /* gcpoll; */ // The while(__helperframe.RestoreState() needs a bit of explanation. // The issue is insuring that the same machine state (which registers saved) // exists when the machine state is probed (when the frame is created, and // when it is actually used (when the frame is popped. We do this by creating // a flow of control from use to def. Note that 'RestoreState' always returns false // we never actually loop, but the compiler does not know that, and thus // will be forced to make the keep the state of register spills the same at // the two locations. #define HELPER_METHOD_FRAME_END_EX_BODY(gcpoll,allowGC) \ /* <TODO>TODO TURN THIS ON!!! </TODO> */ \ /* gcpoll; */ \ DEBUG_ASSURE_NO_RETURN_END(HELPER_METHOD_FRAME); \ INCONTRACT(FCallGCCanTrigger::Leave(__FUNCTION__, __FILE__, __LINE__)); \ FORLAZYMACHSTATE(alwaysZero = \ HelperMethodFrameRestoreState(INDEBUG_COMMA(&__helperframe) \ __helperframe.MachineState());) \ PERMIT_HELPER_METHOD_FRAME_END() \ } FORLAZYMACHSTATE_ENDLOOP(alwaysZero); #define HELPER_METHOD_FRAME_END_EX(gcpoll,allowGC) \ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER; \ __helperframe.Pop(); \ UNINSTALL_MANAGED_EXCEPTION_DISPATCHER; \ HELPER_METHOD_FRAME_END_EX_BODY(gcpoll,allowGC); #define HELPER_METHOD_FRAME_END_EX_NOTHROW(gcpoll,allowGC) \ __helperframe.Pop(); \ HELPER_METHOD_FRAME_END_EX_BODY(gcpoll,allowGC); #define HELPER_METHOD_FRAME_BEGIN_ATTRIB(attribs) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_0() \ HELPER_METHOD_FRAME_BEGIN_ATTRIB(Frame::FRAME_ATTR_NONE) #define HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(attribs) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \ {},FALSE) #define HELPER_METHOD_FRAME_BEGIN_NOPOLL() HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_NONE) #define HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(attribs, arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(1)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_1(arg1) HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_NONE, arg1) #define HELPER_METHOD_FRAME_BEGIN_ATTRIB_2(attribs, arg1, arg2) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(2)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1, (OBJECTREF*) &arg2), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_2(arg1, arg2) HELPER_METHOD_FRAME_BEGIN_ATTRIB_2(Frame::FRAME_ATTR_NONE, arg1, arg2) #define HELPER_METHOD_FRAME_BEGIN_ATTRIB_3(attribs, arg1, arg2, arg3) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg3) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(3)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1, (OBJECTREF*) &arg2, (OBJECTREF*) &arg3), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_3(arg1, arg2, arg3) HELPER_METHOD_FRAME_BEGIN_ATTRIB_3(Frame::FRAME_ATTR_NONE, arg1, arg2, arg3) #define HELPER_METHOD_FRAME_BEGIN_PROTECT(gc) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(PROTECT)(HELPER_FRAME_ARGS(Frame::FRAME_ATTR_NONE), \ (OBJECTREF*)&(gc), sizeof(gc)/sizeof(OBJECTREF)), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_NOPOLL(attribs) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return 0, \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \ {},FALSE) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_NOPOLL(attribs) \ HELPER_METHOD_FRAME_BEGIN_EX( \ FC_RETURN_VC(), \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \ {},FALSE) #define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(attribs) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return 0, \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_0() \ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(Frame::FRAME_ATTR_NONE) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_0() \ HELPER_METHOD_FRAME_BEGIN_EX( \ FC_RETURN_VC(), \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(Frame::FRAME_ATTR_NONE)), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(attribs, arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ return 0, \ HELPER_FRAME_DECL(1)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_NOTHROW_1(probeFailExpr, arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX_NOTHROW( \ return 0, \ HELPER_FRAME_DECL(1)(HELPER_FRAME_ARGS(Frame::FRAME_ATTR_NO_THREAD_ABORT), \ (OBJECTREF*) &arg1), \ HELPER_METHOD_POLL(), TRUE, probeFailExpr) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_1(attribs, arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ FC_RETURN_VC(), \ HELPER_FRAME_DECL(1)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_2(attribs, arg1, arg2) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ return 0, \ HELPER_FRAME_DECL(2)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1, (OBJECTREF*) &arg2), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_2(attribs, arg1, arg2) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ FC_RETURN_VC(), \ HELPER_FRAME_DECL(2)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1, (OBJECTREF*) &arg2), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_PROTECT(attribs, gc) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return 0, \ HELPER_FRAME_DECL(PROTECT)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*)&(gc), sizeof(gc)/sizeof(OBJECTREF)), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_NOPOLL() \ HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_NOPOLL(Frame::FRAME_ATTR_NONE) #define HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL() \ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_NOPOLL(Frame::FRAME_ATTR_NONE) #define HELPER_METHOD_FRAME_BEGIN_RET_1(arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(Frame::FRAME_ATTR_NONE, arg1) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_1(arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_1(Frame::FRAME_ATTR_NONE, arg1) #define HELPER_METHOD_FRAME_BEGIN_RET_2(arg1, arg2) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_2(Frame::FRAME_ATTR_NONE, arg1, arg2) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_2(arg1, arg2) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_2(Frame::FRAME_ATTR_NONE, arg1, arg2) #define HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc) \ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_PROTECT(Frame::FRAME_ATTR_NONE, gc) #define HELPER_METHOD_FRAME_END() HELPER_METHOD_FRAME_END_EX({},FALSE) #define HELPER_METHOD_FRAME_END_POLL() HELPER_METHOD_FRAME_END_EX(HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_END_NOTHROW()HELPER_METHOD_FRAME_END_EX_NOTHROW({},FALSE) // This is the fastest way to do a GC poll if you have already erected a HelperMethodFrame #define HELPER_METHOD_POLL() { __helperframe.Poll(); INCONTRACT(__fCallCheck.SetDidPoll()); } // The HelperMethodFrame knows how to get its return address. Let other code get at it, too. // (Uses comma operator to call InsureInit & discard result. #define HELPER_METHOD_FRAME_GET_RETURN_ADDRESS() \ ( static_cast<UINT_PTR>( (__helperframe.InsureInit(false, NULL)), (__helperframe.MachineState()->GetRetAddr()) ) ) // Very short routines, or routines that are guarenteed to force GC or EH // don't need to poll the GC. USE VERY SPARINGLY!!! #define FC_GC_POLL_NOT_NEEDED() INCONTRACT(__fCallCheck.SetNotNeeded()) Object* FC_GCPoll(void* me, Object* objToProtect = NULL); #define FC_GC_POLL_EX(ret) \ { \ INCONTRACT(Thread::TriggersGC(GetThread());) \ INCONTRACT(__fCallCheck.SetDidPoll();) \ if (g_TrapReturningThreads.LoadWithoutBarrier()) \ { \ if (FC_GCPoll(__me)) \ return ret; \ while (0 == FC_NO_TAILCALL) { }; /* side effect the compile can't remove */ \ } \ } #define FC_GC_POLL() FC_GC_POLL_EX(;) #define FC_GC_POLL_RET() FC_GC_POLL_EX(0) #define FC_GC_POLL_AND_RETURN_OBJREF(obj) \ { \ INCONTRACT(__fCallCheck.SetDidPoll();) \ Object* __temp = OBJECTREFToObject(obj); \ if (g_TrapReturningThreads.LoadWithoutBarrier()) \ { \ __temp = FC_GCPoll(__me, __temp); \ while (0 == FC_NO_TAILCALL) { }; /* side effect the compile can't remove */ \ } \ return __temp; \ } #if defined(ENABLE_CONTRACTS) #define FC_CAN_TRIGGER_GC() FCallGCCanTrigger::Enter() #define FC_CAN_TRIGGER_GC_END() FCallGCCanTrigger::Leave(__FUNCTION__, __FILE__, __LINE__) #define FC_CAN_TRIGGER_GC_HAVE_THREAD(thread) FCallGCCanTrigger::Enter(thread) #define FC_CAN_TRIGGER_GC_HAVE_THREADEND(thread) FCallGCCanTrigger::Leave(thread, __FUNCTION__, __FILE__, __LINE__) // turns on forbidGC for the lifetime of the instance class ForbidGC { protected: Thread *m_pThread; public: ForbidGC(const char *szFile, int lineNum); ~ForbidGC(); }; // this little helper class checks to make certain // 1) ForbidGC is set throughout the routine. // 2) Sometime during the routine, a GC poll is done class FCallCheck : public ForbidGC { public: FCallCheck(const char *szFile, int lineNum); ~FCallCheck(); void SetDidPoll() {LIMITED_METHOD_CONTRACT; didGCPoll = true; } void SetNotNeeded() {LIMITED_METHOD_CONTRACT; notNeeded = true; } private: #ifdef _DEBUG DWORD unbreakableLockCount; #endif bool didGCPoll; // GC poll was done bool notNeeded; // GC poll not needed unsigned __int64 startTicks; // tick count at beginning of FCall }; // FC_COMMON_PROLOG is used for both FCalls and HCalls #define FC_COMMON_PROLOG(target, assertFn) \ /* The following line has to be first. We do not want to trash last error */ \ DWORD __lastError = ::GetLastError(); \ static void* __cache = 0; \ assertFn(__cache, (LPVOID)target); \ { \ Thread *_pThread = GetThread(); \ Thread::ObjectRefFlush(_pThread); \ } \ FCallCheck __fCallCheck(__FILE__, __LINE__); \ FCALL_TRANSITION_BEGIN(); \ ::SetLastError(__lastError); \ void FCallAssert(void*& cache, void* target); void HCallAssert(void*& cache, void* target); #else #define FC_COMMON_PROLOG(target, assertFn) FCALL_TRANSITION_BEGIN() #define FC_CAN_TRIGGER_GC() #define FC_CAN_TRIGGER_GC_END() #endif // ENABLE_CONTRACTS // #FC_INNER // Macros that allows fcall to be split into two function to avoid the helper frame overhead on common fast // codepaths. // // The helper routine needs to know the name of the routine that called it so that it can look up the name of // the managed routine this code is associted with (for managed stack traces). This is passed with the // FC_INNER_PROLOG macro. // // The helper can set up a HELPER_METHOD_FRAME, but should pass the // Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2 which indicates the exact number of // unwinds to do to get back to managed code. Currently we only support depth 2 which means that the // HELPER_METHOD_FRAME needs to be set up in the function directly called by the FCALL. The helper should // use the NOINLINE macro to prevent the compiler from inlining it into the FCALL (which would obviously // mess up the unwind count). // // The other invarient that needs to hold is that the epilog walker needs to be able to get from the call to // the helper routine to the end of the FCALL using trivial heurisitics. The easiest (and only supported) // way of doing this is to place your helper right before a return (eg at the end of the method). Generally // this is not a problem at all, since the FCALL itself will pick off some common case and then tail-call to // the helper for everything else. You must use the code:FC_INNER_RETURN macros to do the call, to insure // that the C++ compiler does not tail-call optimize the call to the inner function and mess up the stack // depth. // // see code:ObjectNative::GetClass for an example // #define FC_INNER_PROLOG(outerfuncname) \ LPVOID __me; \ __me = GetEEFuncEntryPointMacro(outerfuncname); \ FC_CAN_TRIGGER_GC(); \ INCONTRACT(FCallCheck __fCallCheck(__FILE__, __LINE__)); // This variant should be used for inner fcall functions that have the // __me value passed as an argument to the function. This allows // inner functions to be shared across multiple fcalls. #define FC_INNER_PROLOG_NO_ME_SETUP() \ FC_CAN_TRIGGER_GC(); \ INCONTRACT(FCallCheck __fCallCheck(__FILE__, __LINE__)); #define FC_INNER_EPILOG() \ FC_CAN_TRIGGER_GC_END(); // If you are using FC_INNER, and you are tail calling to the helper method (a common case), then you need // to use the FC_INNER_RETURN macros (there is one for methods that return a value and another if the // function returns void). This macro's purpose is to inhibit any tail calll optimization the C++ compiler // might do, which would otherwise confuse the epilog walker. // // * See #FC_INNER for more extern RAW_KEYWORD(volatile) int FC_NO_TAILCALL; #define FC_INNER_RETURN(type, expr) \ type __retVal = expr; \ while (0 == FC_NO_TAILCALL) { }; /* side effect the compile can't remove */ \ return(__retVal); #define FC_INNER_RETURN_VOID(stmt) \ stmt; \ while (0 == FC_NO_TAILCALL) { }; /* side effect the compile can't remove */ \ return; //============================================================================================== // FIMPLn: A set of macros for generating the proto for the actual // implementation (use FDECLN for header protos.) // // The hidden "__me" variable lets us recover the original MethodDesc* // so any thrown exceptions will have the correct stack trace. FCThrow() // passes this along to __FCThrowInternal(). //============================================================================================== #define GetEEFuncEntryPointMacro(func) ((LPVOID)(func)) #define FCIMPL_PROLOG(funcname) \ LPVOID __me; \ __me = GetEEFuncEntryPointMacro(funcname); \ FC_COMMON_PROLOG(__me, FCallAssert) #if defined(_DEBUG) && !defined(__GNUC__) // Build the list of all fcalls signatures. It is used in binder.cpp to verify // compatibility of managed and unmanaged fcall signatures. The check is currently done // for x86 only. #define CHECK_FCALL_SIGNATURE #endif #ifdef CHECK_FCALL_SIGNATURE struct FCSigCheck { public: FCSigCheck(void* fnc, const char* sig) { LIMITED_METHOD_CONTRACT; func = fnc; signature = sig; next = g_pFCSigCheck; g_pFCSigCheck = this; } FCSigCheck* next; void* func; const char* signature; static FCSigCheck* g_pFCSigCheck; }; #define FCSIGCHECK(funcname, signature) \ static FCSigCheck UNIQUE_LABEL(FCSigCheck)(GetEEFuncEntryPointMacro(funcname), signature); #else // CHECK_FCALL_SIGNATURE #define FCSIGCHECK(funcname, signature) #endif // !CHECK_FCALL_SIGNATURE #ifdef SWIZZLE_STKARG_ORDER #ifdef SWIZZLE_REGARG_ORDER #define FCIMPL0(rettype, funcname) rettype F_CALL_CONV funcname() { FCIMPL_PROLOG(funcname) #define FCIMPL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VI(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IIV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VII(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a3, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a3, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a3, a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a3, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a3, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a3, a1, a5, a4, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a3, a2, a5, a4, a1) { FCIMPL_PROLOG(funcname) #else // SWIZZLE_REGARG_ORDER #define FCIMPL0(rettype, funcname) FCSIGCHECK(funcname, #rettype) \ rettype F_CALL_CONV funcname() { FCIMPL_PROLOG(funcname) #define FCIMPL1(rettype, funcname, a1) FCSIGCHECK(funcname, #rettype "," #a1) \ rettype F_CALL_CONV funcname(a1) { FCIMPL_PROLOG(funcname) #define FCIMPL1_V(rettype, funcname, a1) FCSIGCHECK(funcname, #rettype "," "V" #a1) \ rettype F_CALL_CONV funcname(a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2) \ rettype F_CALL_CONV funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL2VA(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," "...") \ rettype F_CALL_VA_CONV funcname(a1, a2, ...) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VV(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," "V" #a2) \ rettype F_CALL_CONV funcname(a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VI(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," #a2) \ rettype F_CALL_CONV funcname(a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2_IV(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," #a1 "," "V" #a2) \ rettype F_CALL_CONV funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3) \ rettype F_CALL_CONV funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IIV(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," "V" #a3) \ rettype F_CALL_CONV funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VII(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," #a2 "," #a3) \ rettype F_CALL_CONV funcname(a2, a3, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVV(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," #a1 "," "V" #a2 "," "V" #a3) \ rettype F_CALL_CONV funcname(a1, a3, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVI(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," #a1 "," "V" #a2 "," #a3) \ rettype F_CALL_CONV funcname(a1, a3, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVI(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," "V" #a2 "," #a3) \ rettype F_CALL_CONV funcname(a2, a1, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVV(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," "V" #a2 "," "V" #a3) \ rettype F_CALL_CONV funcname(a3, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL4(rettype, funcname, a1, a2, a3, a4) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4) \ rettype F_CALL_CONV funcname(a1, a2, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5) \ rettype F_CALL_CONV funcname(a1, a2, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL6(rettype, funcname, a1, a2, a3, a4, a5, a6) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6) \ rettype F_CALL_CONV funcname(a1, a2, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7) \ rettype F_CALL_CONV funcname(a1, a2, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8) \ rettype F_CALL_CONV funcname(a1, a2, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9) \ rettype F_CALL_CONV funcname(a1, a2, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10) \ rettype F_CALL_CONV funcname(a1, a2, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10 "," #a11) \ rettype F_CALL_CONV funcname(a1, a2, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10 "," #a11 "," #a12) \ rettype F_CALL_CONV funcname(a1, a2, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10 "," #a11 "," #a12 "," #a13) \ rettype F_CALL_CONV funcname(a1, a2, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10 "," #a11 "," #a12 "," #a13 "," #a14) \ rettype F_CALL_CONV funcname(a1, a2, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) FCSIGCHECK(funcname, #rettype "," #a1 "," "V" #a2 "," #a3 "," #a4 "," #a5) \ rettype F_CALL_CONV funcname(a1, a3, a5, a4, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL5_VII(rettype, funcname, a1, a2, a3, a4, a5) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," #a2 "," #a3 "," #a4 "," #a5) \ rettype F_CALL_CONV funcname(a2, a3, a5, a4, a1) { FCIMPL_PROLOG(funcname) #endif // !SWIZZLE_REGARG_ORDER #else // SWIZZLE_STKARG_ORDER #define FCIMPL0(rettype, funcname) rettype funcname() { FCIMPL_PROLOG(funcname) #define FCIMPL1(rettype, funcname, a1) rettype funcname(a1) { FCIMPL_PROLOG(funcname) #define FCIMPL1_V(rettype, funcname, a1) rettype funcname(a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2(rettype, funcname, a1, a2) rettype funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL2VA(rettype, funcname, a1, a2) rettype funcname(a1, a2, ...) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VV(rettype, funcname, a1, a2) rettype funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VI(rettype, funcname, a1, a2) rettype funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL2_IV(rettype, funcname, a1, a2) rettype funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IIV(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVV(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VII(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVI(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVI(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVV(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype funcname(a1, a2, a3, a4) { FCIMPL_PROLOG(funcname) #define FCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype funcname(a1, a2, a3, a4, a5) { FCIMPL_PROLOG(funcname) #define FCIMPL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype funcname(a1, a2, a3, a4, a5, a6) { FCIMPL_PROLOG(funcname) #define FCIMPL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype funcname(a1, a2, a3, a4, a5, a6, a7) { FCIMPL_PROLOG(funcname) #define FCIMPL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8) { FCIMPL_PROLOG(funcname) #define FCIMPL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9) { FCIMPL_PROLOG(funcname) #define FCIMPL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) { FCIMPL_PROLOG(funcname) #define FCIMPL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) { FCIMPL_PROLOG(funcname) #define FCIMPL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) { FCIMPL_PROLOG(funcname) #define FCIMPL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) { FCIMPL_PROLOG(funcname) #define FCIMPL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) { FCIMPL_PROLOG(funcname) #define FCIMPL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype funcname(a1, a2, a3, a4, a5) { FCIMPL_PROLOG(funcname) #define FCIMPL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype funcname(a1, a2, a3, a4, a5) { FCIMPL_PROLOG(funcname) #endif // !SWIZZLE_STKARG_ORDER //============================================================================================== // Use this to terminte an FCIMPLEND. //============================================================================================== #define FCIMPL_EPILOG() FCALL_TRANSITION_END() #define FCIMPLEND FCIMPL_EPILOG(); } #define HCIMPL_PROLOG(funcname) LPVOID __me; __me = 0; FC_COMMON_PROLOG(funcname, HCallAssert) // HCIMPL macros are just like their FCIMPL counterparts, however // they do not remember the function they come from. Thus they will not // show up in a stack trace. This is what you want for JIT helpers and the like #ifdef SWIZZLE_STKARG_ORDER #ifdef SWIZZLE_REGARG_ORDER #define HCIMPL0(rettype, funcname) rettype F_CALL_CONV funcname() { HCIMPL_PROLOG(funcname) #define HCIMPL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1) { HCIMPL_PROLOG(funcname) #define HCIMPL1_RAW(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1) { #define HCIMPL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2_RAW(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1) { #define HCIMPL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a2, a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) { HCIMPL_PROLOG(funcname) #define HCIMPL3_RAW(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) { #define HCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a4, a3) { HCIMPL_PROLOG(funcname) #define HCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a5, a4, a3) { HCIMPL_PROLOG(funcname) #define HCCALL0(funcname) funcname() #define HCCALL1(funcname, a1) funcname(0, 0, a1) #define HCCALL1_V(funcname, a1) funcname(0, 0, 0, a1) #define HCCALL2(funcname, a1, a2) funcname(0, a2, a1) #define HCCALL3(funcname, a1, a2, a3) funcname(0, a2, a1, a3) #define HCCALL4(funcname, a1, a2, a3, a4) funcname(0, a2, a1, a4, a3) #define HCCALL5(funcname, a1, a2, a3, a4, a5) funcname(0, a2, a1, a5, a4, a3) #define HCCALL1_PTR(rettype, funcptr, a1) rettype (F_CALL_CONV * funcptr)(int /* EAX */, int /* EDX */, a1) #define HCCALL2_PTR(rettype, funcptr, a1, a2) rettype (F_CALL_CONV * funcptr)(int /* EAX */, a2, a1) #else // SWIZZLE_REGARG_ORDER #define HCIMPL0(rettype, funcname) rettype F_CALL_CONV funcname() { HCIMPL_PROLOG(funcname) #define HCIMPL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { HCIMPL_PROLOG(funcname) #define HCIMPL1_RAW(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { #define HCIMPL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL2_RAW(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { #define HCIMPL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a2, a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) { HCIMPL_PROLOG(funcname) #define HCIMPL3_RAW(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) { #define HCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(a1, a2, a4, a3) { HCIMPL_PROLOG(funcname) #define HCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a5, a4, a3) { HCIMPL_PROLOG(funcname) #define HCCALL0(funcname) funcname() #define HCCALL1(funcname, a1) funcname(a1) #define HCCALL1_V(funcname, a1) funcname(a1) #define HCCALL2(funcname, a1, a2) funcname(a1, a2) #define HCCALL3(funcname, a1, a2, a3) funcname(a1, a2, a3) #define HCCALL4(funcname, a1, a2, a3, a4) funcname(a1, a2, a4, a3) #define HCCALL5(funcname, a1, a2, a3, a4, a5) funcname(a1, a2, a5, a4, a3) #define HCCALL1_PTR(rettype, funcptr, a1) rettype (F_CALL_CONV * (funcptr))(a1) #define HCCALL2_PTR(rettype, funcptr, a1, a2) rettype (F_CALL_CONV * (funcptr))(a1, a2) #endif // !SWIZZLE_REGARG_ORDER #else // SWIZZLE_STKARG_ORDER #define HCIMPL0(rettype, funcname) rettype F_CALL_CONV funcname() { HCIMPL_PROLOG(funcname) #define HCIMPL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { HCIMPL_PROLOG(funcname) #define HCIMPL1_RAW(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { #define HCIMPL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL2_RAW(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { #define HCIMPL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) { HCIMPL_PROLOG(funcname) #define HCIMPL3_RAW(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) { #define HCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(a1, a2, a3, a4) { HCIMPL_PROLOG(funcname) #define HCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5) { HCIMPL_PROLOG(funcname) #define HCCALL0(funcname) funcname() #define HCCALL1(funcname, a1) funcname(a1) #define HCCALL1_V(funcname, a1) funcname(a1) #define HCCALL2(funcname, a1, a2) funcname(a1, a2) #define HCCALL3(funcname, a1, a2, a3) funcname(a1, a2, a3) #define HCCALL4(funcname, a1, a2, a3, a4) funcname(a1, a2, a3, a4) #define HCCALL5(funcname, a1, a2, a3, a4, a5) funcname(a1, a2, a3, a4, a5) #define HCCALL1_PTR(rettype, funcptr, a1) rettype (F_CALL_CONV * (funcptr))(a1) #define HCCALL2_PTR(rettype, funcptr, a1, a2) rettype (F_CALL_CONV * (funcptr))(a1, a2) #endif // !SWIZZLE_STKARG_ORDER #define HCIMPLEND_RAW } #define HCIMPLEND FCALL_TRANSITION_END(); } //============================================================================================== // Throws an exception from an FCall. See rexcep.h for a list of valid // exception codes. //============================================================================================== #define FCThrow(reKind) FCThrowEx(reKind, 0, 0, 0, 0) //============================================================================================== // This version lets you attach a message with inserts (similar to // COMPlusThrow()). //============================================================================================== #define FCThrowEx(reKind, resID, arg1, arg2, arg3) \ { \ while (NULL == \ __FCThrow(__me, reKind, resID, arg1, arg2, arg3)) {}; \ return 0; \ } //============================================================================================== // Like FCThrow but can be used for a VOID-returning FCall. The only // difference is in the "return" statement. //============================================================================================== #define FCThrowVoid(reKind) FCThrowExVoid(reKind, 0, 0, 0, 0) //============================================================================================== // This version lets you attach a message with inserts (similar to // COMPlusThrow()). //============================================================================================== #define FCThrowExVoid(reKind, resID, arg1, arg2, arg3) \ { \ while (NULL == \ __FCThrow(__me, reKind, resID, arg1, arg2, arg3)) {}; \ return; \ } // Use FCThrowRes to throw an exception with a localized error message from the // ResourceManager in managed code. #define FCThrowRes(reKind, resourceName) FCThrowArgumentEx(reKind, NULL, resourceName) #define FCThrowArgumentNull(argName) FCThrowArgumentEx(kArgumentNullException, argName, NULL) #define FCThrowArgumentOutOfRange(argName, message) FCThrowArgumentEx(kArgumentOutOfRangeException, argName, message) #define FCThrowArgument(argName, message) FCThrowArgumentEx(kArgumentException, argName, message) #define FCThrowArgumentEx(reKind, argName, resourceName) \ { \ while (NULL == \ __FCThrowArgument(__me, reKind, argName, resourceName)) {}; \ return 0; \ } // Use FCThrowRes to throw an exception with a localized error message from the // ResourceManager in managed code. #define FCThrowResVoid(reKind, resourceName) FCThrowArgumentVoidEx(reKind, NULL, resourceName) #define FCThrowArgumentNullVoid(argName) FCThrowArgumentVoidEx(kArgumentNullException, argName, NULL) #define FCThrowArgumentOutOfRangeVoid(argName, message) FCThrowArgumentVoidEx(kArgumentOutOfRangeException, argName, message) #define FCThrowArgumentVoid(argName, message) FCThrowArgumentVoidEx(kArgumentException, argName, message) #define FCThrowArgumentVoidEx(reKind, argName, resourceName) \ { \ while (NULL == \ __FCThrowArgument(__me, reKind, argName, resourceName)) {}; \ return; \ } // The x86 JIT calling convention expects returned small types (e.g. bool) to be // widened on return. The C/C++ calling convention does not guarantee returned // small types to be widened. The small types has to be artifically widened on return // to fit x86 JIT calling convention. Thus fcalls returning small types has to // use the FC_XXX_RET types to force C/C++ compiler to do the widening. // // The most common small return type of FCALLs is bool. The widening of bool is // especially tricky since the value has to be also normalized. FC_BOOL_RET and // FC_RETURN_BOOL macros are provided to make it fool-proof. FCALLs returning bool // should be implemented using following pattern: // FCIMPL0(FC_BOOL_RET, Foo) // the return type should be FC_BOOL_RET // BOOL ret; // // FC_RETURN_BOOL(ret); // return statements should be FC_RETURN_BOOL // FCIMPLEND // This rules are verified in binder.cpp if COMPlus_ConsistencyCheck is set. #ifdef _PREFAST_ // Use prefast build to ensure that functions returning FC_BOOL_RET // are using FC_RETURN_BOOL to return it. Missing FC_RETURN_BOOL will // result into type mismatch error in prefast builds. This will also // catch misuses of FC_BOOL_RET for other places (e.g. in FCALL parameters). typedef LPVOID FC_BOOL_RET; #define FC_RETURN_BOOL(x) do { return (LPVOID)!!(x); } while(0) #else #if defined(TARGET_X86) || defined(TARGET_AMD64) // The return value is artifically widened on x86 and amd64 typedef INT32 FC_BOOL_RET; #else typedef CLR_BOOL FC_BOOL_RET; #endif #define FC_RETURN_BOOL(x) do { return !!(x); } while(0) #endif #if defined(TARGET_X86) || defined(TARGET_AMD64) // The return value is artifically widened on x86 and amd64 typedef UINT32 FC_CHAR_RET; typedef INT32 FC_INT8_RET; typedef UINT32 FC_UINT8_RET; typedef INT32 FC_INT16_RET; typedef UINT32 FC_UINT16_RET; #else typedef CLR_CHAR FC_CHAR_RET; typedef INT8 FC_INT8_RET; typedef UINT8 FC_UINT8_RET; typedef INT16 FC_INT16_RET; typedef UINT16 FC_UINT16_RET; #endif // FC_TypedByRef should be used for TypedReferences in FCall signatures #define FC_TypedByRef TypedByRef #define FC_DECIMAL DECIMAL // The fcall entrypoints has to be at unique addresses. Use this helper macro to make // the code of the fcalls unique if you get assert in ecall.cpp that mentions it. // The parameter of the FCUnique macro is an arbitrary 32-bit random non-zero number. #define FCUnique(unique) { Volatile<int> u = (unique); while (u.LoadWithoutBarrier() == 0) { }; } // FCALL contracts come in two forms: // // Short form that should be used if the FCALL contract does not have any extras like preconditions, failure injection. Example: // // FCIMPL0(void, foo) // { // FCALL_CONTRACT; // ... // // Long form that should be used otherwise. Example: // // FCIMPL1(void, foo, void *p) // { // CONTRACTL { // FCALL_CHECK; // PRECONDITION(CheckPointer(p)); // } CONTRACTL_END; // ... // // FCALL_CHECK defines the actual contract conditions required for FCALLs // #define FCALL_CHECK \ THROWS; \ DISABLED(GC_TRIGGERS); /* FCALLS with HELPER frames have issues with GC_TRIGGERS */ \ MODE_COOPERATIVE; // // FCALL_CONTRACT should be the following shortcut: // // #define FCALL_CONTRACT CONTRACTL { FCALL_CHECK; } CONTRACTL_END; // // Since there is very little value in having runtime contracts in FCalls, FCALL_CONTRACT is defined as static contract only for performance reasons. // #define FCALL_CONTRACT \ STATIC_CONTRACT_THROWS; \ /* FCALLS are a special case contract wise, they are "NOTRIGGER, unless you setup a frame" */ \ STATIC_CONTRACT_GC_NOTRIGGER; \ STATIC_CONTRACT_MODE_COOPERATIVE #endif //__FCall_h__
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // FCall.H // // // FCall is a high-performance alternative to ECall. Unlike ECall, FCall // methods do not necessarily create a frame. Jitted code calls directly // to the FCall entry point. It is possible to do operations that need // to have a frame within an FCall, you need to manually set up the frame // before you do such operations. // It is illegal to cause a GC or EH to happen in an FCALL before setting // up a frame. To prevent accidentally violating this rule, FCALLs turn // on BEGINGCFORBID, which insures that these things can't happen in a // checked build without causing an ASSERTE. Once you set up a frame, // this state is turned off as long as the frame is active, and then is // turned on again when the frame is torn down. This mechanism should // be sufficient to insure that the rules are followed. // In general you set up a frame by using the following macros // HELPER_METHOD_FRAME_BEGIN_RET*() // Use If the FCALL has a return value // HELPER_METHOD_FRAME_BEGIN*() // Use If FCALL does not return a value // HELPER_METHOD_FRAME_END*() // These macros introduce a scope which is protected by an HelperMethodFrame. // In this scope you can do EH or GC. There are rules associated with // their use. In particular // 1) These macros can only be used in the body of a FCALL (that is // something using the FCIMPL* or HCIMPL* macros for their decaration. // 2) You may not perform a 'return' within this scope.. // Compile time errors occur if you try to violate either of these rules. // The frame that is set up does NOT protect any GC variables (in particular the // arguments of the FCALL. Thus you need to do an explicit GCPROTECT once the // frame is established if you need to protect an argument. There are flavors // of HELPER_METHOD_FRAME that protect a certain number of GC variables. For // example // HELPER_METHOD_FRAME_BEGIN_RET_2(arg1, arg2) // will protect the GC variables arg1, and arg2 as well as erecting the frame. // Another invariant that you must be aware of is the need to poll to see if // a GC is needed by some other thread. Unless the FCALL is VERY short, // every code path through the FCALL must do such a poll. The important // thing here is that a poll will cause a GC, and thus you can only do it // when all you GC variables are protected. To make things easier // HELPER_METHOD_FRAMES that protect things automatically do this poll. // If you don't need to protect anything HELPER_METHOD_FRAME_BEGIN_0 // will also do the poll. // Sometimes it is convenient to do the poll a the end of the frame, you // can use HELPER_METHOD_FRAME_BEGIN_NOPOLL and HELPER_METHOD_FRAME_END_POLL // to do the poll at the end. If somewhere in the middle is the best // place you can do that too with HELPER_METHOD_POLL() // You don't need to erect a helper method frame to do a poll. FC_GC_POLL // can do this (remember all your GC refs will be trashed). // Finally if your method is VERY small, you can get away without a poll, // you have to use FC_GC_POLL_NOT_NEEDED to mark this. // Use sparingly! // It is possible to set up the frame as the first operation in the FCALL and // tear it down as the last operation before returning. This works and is // reasonably efficient (as good as an ECall), however, if it is the case that // you can defer the setup of the frame to an unlikely code path (exception path) // that is much better. // If you defer setup of the frame, all codepaths leading to the frame setup // must be wrapped with PERMIT_HELPER_METHOD_FRAME_BEGIN/END. These block // certain compiler optimizations that interfere with the delayed frame setup. // These macros are automatically included in the HCIMPL, FCIMPL, and frame // setup macros. // <TODO>TODO: we should have a way of doing a trial allocation (an allocation that // will fail if it would cause a GC). That way even FCALLs that need to allocate // would not necessarily need to set up a frame. </TODO> // It is common to only need to set up a frame in order to throw an exception. // While this can be done by doing // HELPER_METHOD_FRAME_BEGIN() // Use if FCALL does not return a value // COMPlusThrow(execpt); // HELPER_METHOD_FRAME_END() // It is more efficient (in space) to use convenience macro FCTHROW that does // this for you (sets up a frame, and does the throw). // FCTHROW(except) // Since FCALLS have to conform to the EE calling conventions and not to C // calling conventions, FCALLS, need to be declared using special macros (FCIMPL*) // that implement the correct calling conventions. There are variants of these // macros depending on the number of args, and sometimes the types of the // arguments. //------------------------------------------------------------------------ // A very simple example: // // FCIMPL2(INT32, Div, INT32 x, INT32 y) // { // if (y == 0) // FCThrow(kDivideByZeroException); // return x/y; // } // FCIMPLEND // // // *** WATCH OUT FOR THESE GOTCHAS: *** // ------------------------------------ // - In your FCDECL & FCIMPL protos, don't declare a param as type OBJECTREF // or any of its deriveds. This will break on the checked build because // __fastcall doesn't enregister C++ objects (which OBJECTREF is). // Instead, you need to do something like; // // FCIMPL(.., .., Object* pObject0) // OBJECTREF pObject = ObjectToOBJECTREF(pObject0); // FCIMPL // // For similar reasons, use Object* rather than OBJECTREF as a return type. // Consider either using ObjectToOBJECTREF or calling VALIDATEOBJECTREF // to make sure your Object* is valid. // // - FCThrow() must be called directly from your FCall impl function: it // cannot be called from a subfunction. Calling from a subfunction breaks // the VC code parsing workaround that lets us recover the callee saved registers. // Fortunately, you'll get a compile error complaining about an // unknown variable "__me". // // - If your FCall returns VOID, you must use FCThrowVoid() rather than // FCThrow(). This is because FCThrow() has to generate an unexecuted // "return" statement for the code parser. // // - On x86, if first and/or second argument of your FCall cannot be passed // in either of the __fastcall registers (ECX/EDX), you must use "V" versions // of FCDECL and FCIMPL macros to enregister arguments correctly. Some of the // most common types that fit this requirement are 64-bit values (i.e. INT64 or // UINT64) and floating-point values (i.e. FLOAT or DOUBLE). For example, FCDECL3_IVI // must be used for FCalls that take 3 arguments and 2nd argument is INT64 and // FDECL2_VV must be used for FCalls that take 2 arguments where both are FLOAT. // // - You may use structs for protecting multiple OBJECTREF's simultaneously. // In these cases, you must use a variant of a helper method frame with PROTECT // in the name, to ensure all the OBJECTREF's in the struct get protected. // Also, initialize all the OBJECTREF's first. Like this: // // FCIMPL4(Object*, COMNlsInfo::nativeChangeCaseString, LocaleIDObject* localeUNSAFE, // INT_PTR pNativeTextInfo, StringObject* pStringUNSAFE, CLR_BOOL bIsToUpper) // { // [ignoring CONTRACT for now] // struct _gc // { // STRINGREF pResult; // STRINGREF pString; // LOCALEIDREF pLocale; // } gc; // gc.pResult = NULL; // gc.pString = ObjectToSTRINGREF(pStringUNSAFE); // gc.pLocale = (LOCALEIDREF)ObjectToOBJECTREF(localeUNSAFE); // // HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc) // // If you forgot the PROTECT part, the macro will only protect the first OBJECTREF, // introducing a subtle GC hole in your code. Fortunately, we now issue a // compile-time error if you forget. // How FCall works: // ---------------- // An FCall target uses __fastcall or some other calling convention to // match the IL calling convention exactly. Thus, a call to FCall is a direct // call to the target w/ no intervening stub or frame. // // The tricky part is when FCThrow is called. FCThrow must generate // a proper method frame before allocating and throwing the exception. // To do this, it must recover several things: // // - The location of the FCIMPL's return address (since that's // where the frame will be based.) // // - The on-entry values of the callee-saved regs; which must // be recorded in the frame so that GC can update them. // Depending on how VC compiles your FCIMPL, those values are still // in the original registers or saved on the stack. // // To figure out which, FCThrow() generates the code: // // while (NULL == __FCThrow(__me, ...)) {}; // return 0; // // The "return" statement will never execute; but its presence guarantees // that VC will follow the __FCThrow() call with a VC epilog // that restores the callee-saved registers using a pretty small // and predictable set of Intel opcodes. __FCThrow() parses this // epilog and simulates its execution to recover the callee saved // registers. // // The while loop is to prevent the compiler from doing tail call optimizations. // The helper frame interpretter needs the frame to be present. // // - The MethodDesc* that this FCall implements. This MethodDesc* // is part of the frame and ensures that the FCall will appear // in the exception's stack trace. To get this, FCDECL declares // a static local __me, initialized to point to the FC target itself. // This address is exactly what's stored in the ECall lookup tables; // so __FCThrow() simply does a reverse lookup on that table to recover // the MethodDesc*. // #ifndef __FCall_h__ #define __FCall_h__ #include "gms.h" #include "runtimeexceptionkind.h" #include "debugreturn.h" //============================================================================================== // These macros defeat compiler optimizations that might mix nonvolatile // register loads and stores with other code in the function body. This // creates problems for the frame setup code, which assumes that any // nonvolatiles that are saved at the point of the frame setup will be // re-loaded when the frame is popped. // // Currently this is only known to be an issue on AMD64. It's uncertain // whether it is an issue on x86. //============================================================================================== #if defined(TARGET_AMD64) && !defined(TARGET_UNIX) // // On AMD64 this is accomplished by including a setjmp anywhere in a function. // Doesn't matter whether it is reachable or not, and in fact in optimized // builds the setjmp is removed altogether. // #include <setjmp.h> // // Use of setjmp is temporary, we will eventually have compiler intrinsics to // disable the optimizations. Besides, we don't actually execute setjmp in // these macros (or anywhere else in the VM on AMD64). // #pragma warning(disable:4611) // interaction between '_setjmp' and C++ object destruction is non-portable #ifdef _DEBUG // // Linked list of unmanaged methods preceeding a HelperMethodFrame push. This // is linked onto the current Thread. Each list entry is stack-allocated so it // can be associated with an unmanaged frame. Each unmanaged frame needs to be // associated with at least one list entry. // struct HelperMethodFrameCallerList { HelperMethodFrameCallerList *pCaller; }; #endif // _DEBUG // // Resets the Thread state at a new managed -> fcall transition. // class FCallTransitionState { public: FCallTransitionState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); ~FCallTransitionState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); #ifdef _DEBUG private: Thread *m_pThread; HelperMethodFrameCallerList *m_pPreviousHelperMethodFrameCallerList; #endif // _DEBUG }; // // Pushes/pops state for each caller. // class PermitHelperMethodFrameState { public: PermitHelperMethodFrameState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); ~PermitHelperMethodFrameState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); static VOID CheckHelperMethodFramePermitted () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); #ifdef _DEBUG private: Thread *m_pThread; HelperMethodFrameCallerList m_ListEntry; #endif // _DEBUG }; // // Resets the Thread state after the HelperMethodFrame is pushed. At this // point, the HelperMethodFrame is capable of unwinding to the managed code, // so we can reset the Thread state for any nested fcalls. // class CompletedFCallTransitionState { public: CompletedFCallTransitionState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); ~CompletedFCallTransitionState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); #ifdef _DEBUG private: HelperMethodFrameCallerList *m_pLastHelperMethodFrameCallerList; #endif // _DEBUG }; #define PERMIT_HELPER_METHOD_FRAME_BEGIN() \ if (1) \ { \ PermitHelperMethodFrameState ___PermitHelperMethodFrameState; #define PERMIT_HELPER_METHOD_FRAME_END() \ } \ else \ { \ jmp_buf ___jmpbuf; \ setjmp(___jmpbuf); \ __assume(0); \ } #define FCALL_TRANSITION_BEGIN() \ FCallTransitionState ___FCallTransitionState; \ PERMIT_HELPER_METHOD_FRAME_BEGIN(); #define FCALL_TRANSITION_END() \ PERMIT_HELPER_METHOD_FRAME_END(); #define CHECK_HELPER_METHOD_FRAME_PERMITTED() \ PermitHelperMethodFrameState::CheckHelperMethodFramePermitted(); \ CompletedFCallTransitionState ___CompletedFCallTransitionState; #else // unsupported processor #define PERMIT_HELPER_METHOD_FRAME_BEGIN() #define PERMIT_HELPER_METHOD_FRAME_END() #define FCALL_TRANSITION_BEGIN() #define FCALL_TRANSITION_END() #define CHECK_HELPER_METHOD_FRAME_PERMITTED() #endif // unsupported processor //============================================================================================== // This is where FCThrow ultimately ends up. Never call this directly. // Use the FCThrow() macros. __FCThrowArgument is the helper to throw ArgumentExceptions // with a resource taken from the managed resource manager. //============================================================================================== LPVOID __FCThrow(LPVOID me, enum RuntimeExceptionKind reKind, UINT resID, LPCWSTR arg1, LPCWSTR arg2, LPCWSTR arg3); LPVOID __FCThrowArgument(LPVOID me, enum RuntimeExceptionKind reKind, LPCWSTR argumentName, LPCWSTR resourceName); //============================================================================================== // FDECLn: A set of macros for generating header declarations for FC targets. // Use FIMPLn for the actual body. //============================================================================================== // Note: on the x86, these defs reverse all but the first two arguments // (IL stack calling convention is reversed from __fastcall.) // Calling convention for varargs #define F_CALL_VA_CONV __cdecl #ifdef TARGET_X86 // Choose the appropriate calling convention for FCALL helpers on the basis of the JIT calling convention #ifdef __GNUC__ #define F_CALL_CONV __attribute__((cdecl, regparm(3))) // GCC FCALL convention (simulated via cdecl, regparm(3)) is different from MSVC FCALL convention. GCC can use up // to 3 registers to store parameters. The registers used are EAX, EDX, ECX. Dummy parameters and reordering // of the actual parameters in the FCALL signature is used to make the calling convention to look like in MSVC. #define SWIZZLE_REGARG_ORDER #else // __GNUC__ #define F_CALL_CONV __fastcall #endif // !__GNUC__ #define SWIZZLE_STKARG_ORDER #else // TARGET_X86 // // non-x86 platforms don't have messed-up calling convention swizzling // #define F_CALL_CONV #endif // !TARGET_X86 #ifdef SWIZZLE_STKARG_ORDER #ifdef SWIZZLE_REGARG_ORDER #define FCDECL0(rettype, funcname) rettype F_CALL_CONV funcname() #define FCDECL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1) #define FCDECL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a1) #define FCDECL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1) #define FCDECL2VA(rettype, funcname, a1, a2) rettype F_CALL_VA_CONV funcname(a1, a2, ...) #define FCDECL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a2, a1) #define FCDECL2_VI(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a2, a1) #define FCDECL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a2) #define FCDECL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) #define FCDECL3_IIV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) #define FCDECL3_VII(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a3, a2, a1) #define FCDECL3_IVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a3, a2) #define FCDECL3_IVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a3, a1, a2) #define FCDECL3_VVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a3, a2, a1) #define FCDECL3_VVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a3, a2, a1) #define FCDECL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a4, a3) #define FCDECL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a5, a4, a3) #define FCDECL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a6, a5, a4, a3) #define FCDECL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a7, a6, a5, a4, a3) #define FCDECL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a8, a7, a6, a5, a4, a3) #define FCDECL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a9, a8, a7, a6, a5, a4, a3) #define FCDECL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a3, a1, a5, a4, a2) #define FCDECL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a3, a2, a5, a4, a1) #else // SWIZZLE_REGARG_ORDER #define FCDECL0(rettype, funcname) rettype F_CALL_CONV funcname() #define FCDECL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) #define FCDECL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) #define FCDECL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL2VA(rettype, funcname, a1, a2) rettype F_CALL_VA_CONV funcname(a1, a2, ...) #define FCDECL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a2, a1) #define FCDECL2_VI(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a2, a1) #define FCDECL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_IIV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_VII(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a2, a3, a1) #define FCDECL3_IVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a3, a2) #define FCDECL3_IVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a3, a2) #define FCDECL3_VVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a2, a1, a3) #define FCDECL3_VVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a3, a2, a1) #define FCDECL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(a1, a2, a4, a3) #define FCDECL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a5, a4, a3) #define FCDECL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype F_CALL_CONV funcname(a1, a2, a6, a5, a4, a3) #define FCDECL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype F_CALL_CONV funcname(a1, a2, a7, a6, a5, a4, a3) #define FCDECL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype F_CALL_CONV funcname(a1, a2, a8, a7, a6, a5, a4, a3) #define FCDECL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype F_CALL_CONV funcname(a1, a2, a9, a8, a7, a6, a5, a4, a3) #define FCDECL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype F_CALL_CONV funcname(a1, a2, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype F_CALL_CONV funcname(a1, a2, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype F_CALL_CONV funcname(a1, a2, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype F_CALL_CONV funcname(a1, a2, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype F_CALL_CONV funcname(a1, a2, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a3, a5, a4, a2) #define FCDECL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a2, a3, a5, a4, a1) #endif // !SWIZZLE_REGARG_ORDER #if 0 // // don't use something like this... directly calling an FCALL from within the runtime breaks stackwalking because // the FCALL reverse mapping only gets established in ECall::GetFCallImpl and that codepath is circumvented by // directly calling and FCALL // See below for usage of FC_CALL_INNER (used in SecurityStackWalk::Check presently) // #define FCCALL0(funcname) funcname() #define FCCALL1(funcname, a1) funcname(a1) #define FCCALL2(funcname, a1, a2) funcname(a1, a2) #define FCCALL3(funcname, a1, a2, a3) funcname(a1, a2, a3) #define FCCALL4(funcname, a1, a2, a3, a4) funcname(a1, a2, a4, a3) #define FCCALL5(funcname, a1, a2, a3, a4, a5) funcname(a1, a2, a5, a4, a3) #define FCCALL6(funcname, a1, a2, a3, a4, a5, a6) funcname(a1, a2, a6, a5, a4, a3) #define FCCALL7(funcname, a1, a2, a3, a4, a5, a6, a7) funcname(a1, a2, a7, a6, a5, a4, a3) #define FCCALL8(funcname, a1, a2, a3, a4, a5, a6, a7, a8) funcname(a1, a2, a8, a7, a6, a5, a4, a3) #define FCCALL9(funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) funcname(a1, a2, a9, a8, a7, a6, a5, a4, a3) #define FCCALL10(funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) funcname(a1, a2, a10, a9, a8, a7, a6, a5, a4, a3) #define FCCALL11(funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) funcname(a1, a2, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCCALL12(funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) funcname(a1, a2, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #endif // 0 #else // !SWIZZLE_STKARG_ORDER #define FCDECL0(rettype, funcname) rettype F_CALL_CONV funcname() #define FCDECL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) #define FCDECL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) #define FCDECL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL2VA(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2, ...) #define FCDECL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL2_VI(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_IIV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_VII(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_IVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_IVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_VVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_VVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(a1, a2, a3, a4) #define FCDECL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5) #define FCDECL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6) #define FCDECL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7) #define FCDECL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8) #define FCDECL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9) #define FCDECL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) #define FCDECL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) #define FCDECL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) #define FCDECL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) #define FCDECL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) #define FCDECL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5) #define FCDECL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5) #endif // !SWIZZLE_STKARG_ORDER #define HELPER_FRAME_DECL(x) FrameWithCookie<HelperMethodFrame_##x##OBJ> __helperframe // use the capture state machinery if the architecture has one // // For a normal build we create a loop (see explaination on RestoreState below) // We don't want a loop here for PREFAST since that causes // warning 263: Using _alloca in a loop // And we can't use DEBUG_OK_TO_RETURN for PREFAST because the PREFAST version // requires that you already be in a DEBUG_ASSURE_NO_RETURN_BEGIN scope #define HelperMethodFrame_0OBJ HelperMethodFrame #define HELPER_FRAME_ARGS(attribs) __me, attribs #define FORLAZYMACHSTATE(x) x #if defined(_PREFAST_) #define FORLAZYMACHSTATE_BEGINLOOP(x) x #define FORLAZYMACHSTATE_ENDLOOP(x) #define FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_BEGIN #define FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_END #else #define FORLAZYMACHSTATE_BEGINLOOP(x) x do #define FORLAZYMACHSTATE_ENDLOOP(x) while(x) #define FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_BEGIN DEBUG_OK_TO_RETURN_BEGIN(LAZYMACHSTATE) #define FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_END DEBUG_OK_TO_RETURN_END(LAZYMACHSTATE) #endif // BEGIN: before gcpoll //FCallGCCanTriggerNoDtor __fcallGcCanTrigger; //__fcallGcCanTrigger.Enter(); // END: after gcpoll //__fcallGcCanTrigger.Leave(__FUNCTION__, __FILE__, __LINE__); // We have to put DEBUG_OK_TO_RETURN_BEGIN around the FORLAZYMACHSTATE // to allow the HELPER_FRAME to be installed inside an SO_INTOLERANT region // which does not allow a return. The return is used by FORLAZYMACHSTATE // to capture the state, but is not an actual return, so it is ok. #define HELPER_METHOD_FRAME_BEGIN_EX_BODY(ret, helperFrame, gcpoll, allowGC) \ FORLAZYMACHSTATE_BEGINLOOP(int alwaysZero = 0;) \ { \ INDEBUG(static BOOL __haveCheckedRestoreState = FALSE;) \ PERMIT_HELPER_METHOD_FRAME_BEGIN(); \ CHECK_HELPER_METHOD_FRAME_PERMITTED(); \ helperFrame; \ FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_BEGIN; \ FORLAZYMACHSTATE(CAPTURE_STATE(__helperframe.MachineState(), ret);) \ FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_END; \ INDEBUG(__helperframe.SetAddrOfHaveCheckedRestoreState(&__haveCheckedRestoreState)); \ DEBUG_ASSURE_NO_RETURN_BEGIN(HELPER_METHOD_FRAME); \ INCONTRACT(FCallGCCanTrigger::Enter()); #define HELPER_METHOD_FRAME_BEGIN_EX(ret, helperFrame, gcpoll, allowGC) \ HELPER_METHOD_FRAME_BEGIN_EX_BODY(ret, helperFrame, gcpoll, allowGC) \ /* <TODO>TODO TURN THIS ON!!! </TODO> */ \ /* gcpoll; */ \ INSTALL_MANAGED_EXCEPTION_DISPATCHER; \ __helperframe.Push(); \ MAKE_CURRENT_THREAD_AVAILABLE_EX(__helperframe.GetThread()); \ INSTALL_UNWIND_AND_CONTINUE_HANDLER_FOR_HMF(&__helperframe); #define HELPER_METHOD_FRAME_BEGIN_EX_NOTHROW(ret, helperFrame, gcpoll, allowGC, probeFailExpr) \ HELPER_METHOD_FRAME_BEGIN_EX_BODY(ret, helperFrame, gcpoll, allowGC) \ __helperframe.Push(); \ MAKE_CURRENT_THREAD_AVAILABLE_EX(__helperframe.GetThread()); \ /* <TODO>TODO TURN THIS ON!!! </TODO> */ \ /* gcpoll; */ // The while(__helperframe.RestoreState() needs a bit of explanation. // The issue is insuring that the same machine state (which registers saved) // exists when the machine state is probed (when the frame is created, and // when it is actually used (when the frame is popped. We do this by creating // a flow of control from use to def. Note that 'RestoreState' always returns false // we never actually loop, but the compiler does not know that, and thus // will be forced to make the keep the state of register spills the same at // the two locations. #define HELPER_METHOD_FRAME_END_EX_BODY(gcpoll,allowGC) \ /* <TODO>TODO TURN THIS ON!!! </TODO> */ \ /* gcpoll; */ \ DEBUG_ASSURE_NO_RETURN_END(HELPER_METHOD_FRAME); \ INCONTRACT(FCallGCCanTrigger::Leave(__FUNCTION__, __FILE__, __LINE__)); \ FORLAZYMACHSTATE(alwaysZero = \ HelperMethodFrameRestoreState(INDEBUG_COMMA(&__helperframe) \ __helperframe.MachineState());) \ PERMIT_HELPER_METHOD_FRAME_END() \ } FORLAZYMACHSTATE_ENDLOOP(alwaysZero); #define HELPER_METHOD_FRAME_END_EX(gcpoll,allowGC) \ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER; \ __helperframe.Pop(); \ UNINSTALL_MANAGED_EXCEPTION_DISPATCHER; \ HELPER_METHOD_FRAME_END_EX_BODY(gcpoll,allowGC); #define HELPER_METHOD_FRAME_END_EX_NOTHROW(gcpoll,allowGC) \ __helperframe.Pop(); \ HELPER_METHOD_FRAME_END_EX_BODY(gcpoll,allowGC); #define HELPER_METHOD_FRAME_BEGIN_ATTRIB(attribs) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_0() \ HELPER_METHOD_FRAME_BEGIN_ATTRIB(Frame::FRAME_ATTR_NONE) #define HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(attribs) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \ {},FALSE) #define HELPER_METHOD_FRAME_BEGIN_NOPOLL() HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_NONE) #define HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(attribs, arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(1)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_1(arg1) HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_NONE, arg1) #define HELPER_METHOD_FRAME_BEGIN_ATTRIB_2(attribs, arg1, arg2) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(2)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1, (OBJECTREF*) &arg2), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_2(arg1, arg2) HELPER_METHOD_FRAME_BEGIN_ATTRIB_2(Frame::FRAME_ATTR_NONE, arg1, arg2) #define HELPER_METHOD_FRAME_BEGIN_ATTRIB_3(attribs, arg1, arg2, arg3) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg3) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(3)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1, (OBJECTREF*) &arg2, (OBJECTREF*) &arg3), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_3(arg1, arg2, arg3) HELPER_METHOD_FRAME_BEGIN_ATTRIB_3(Frame::FRAME_ATTR_NONE, arg1, arg2, arg3) #define HELPER_METHOD_FRAME_BEGIN_PROTECT(gc) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(PROTECT)(HELPER_FRAME_ARGS(Frame::FRAME_ATTR_NONE), \ (OBJECTREF*)&(gc), sizeof(gc)/sizeof(OBJECTREF)), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_NOPOLL(attribs) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return 0, \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \ {},FALSE) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_NOPOLL(attribs) \ HELPER_METHOD_FRAME_BEGIN_EX( \ FC_RETURN_VC(), \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \ {},FALSE) #define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(attribs) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return 0, \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_0() \ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(Frame::FRAME_ATTR_NONE) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_0() \ HELPER_METHOD_FRAME_BEGIN_EX( \ FC_RETURN_VC(), \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(Frame::FRAME_ATTR_NONE)), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(attribs, arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ return 0, \ HELPER_FRAME_DECL(1)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_NOTHROW_1(probeFailExpr, arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX_NOTHROW( \ return 0, \ HELPER_FRAME_DECL(1)(HELPER_FRAME_ARGS(Frame::FRAME_ATTR_NO_THREAD_ABORT), \ (OBJECTREF*) &arg1), \ HELPER_METHOD_POLL(), TRUE, probeFailExpr) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_1(attribs, arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ FC_RETURN_VC(), \ HELPER_FRAME_DECL(1)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_2(attribs, arg1, arg2) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ return 0, \ HELPER_FRAME_DECL(2)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1, (OBJECTREF*) &arg2), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_2(attribs, arg1, arg2) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ FC_RETURN_VC(), \ HELPER_FRAME_DECL(2)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1, (OBJECTREF*) &arg2), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_PROTECT(attribs, gc) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return 0, \ HELPER_FRAME_DECL(PROTECT)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*)&(gc), sizeof(gc)/sizeof(OBJECTREF)), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_NOPOLL() \ HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_NOPOLL(Frame::FRAME_ATTR_NONE) #define HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL() \ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_NOPOLL(Frame::FRAME_ATTR_NONE) #define HELPER_METHOD_FRAME_BEGIN_RET_1(arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(Frame::FRAME_ATTR_NONE, arg1) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_1(arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_1(Frame::FRAME_ATTR_NONE, arg1) #define HELPER_METHOD_FRAME_BEGIN_RET_2(arg1, arg2) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_2(Frame::FRAME_ATTR_NONE, arg1, arg2) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_2(arg1, arg2) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_2(Frame::FRAME_ATTR_NONE, arg1, arg2) #define HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc) \ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_PROTECT(Frame::FRAME_ATTR_NONE, gc) #define HELPER_METHOD_FRAME_END() HELPER_METHOD_FRAME_END_EX({},FALSE) #define HELPER_METHOD_FRAME_END_POLL() HELPER_METHOD_FRAME_END_EX(HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_END_NOTHROW()HELPER_METHOD_FRAME_END_EX_NOTHROW({},FALSE) // This is the fastest way to do a GC poll if you have already erected a HelperMethodFrame #define HELPER_METHOD_POLL() { __helperframe.Poll(); INCONTRACT(__fCallCheck.SetDidPoll()); } // The HelperMethodFrame knows how to get its return address. Let other code get at it, too. // (Uses comma operator to call InsureInit & discard result. #define HELPER_METHOD_FRAME_GET_RETURN_ADDRESS() \ ( static_cast<UINT_PTR>( (__helperframe.InsureInit(false, NULL)), (__helperframe.MachineState()->GetRetAddr()) ) ) // Very short routines, or routines that are guarenteed to force GC or EH // don't need to poll the GC. USE VERY SPARINGLY!!! #define FC_GC_POLL_NOT_NEEDED() INCONTRACT(__fCallCheck.SetNotNeeded()) Object* FC_GCPoll(void* me, Object* objToProtect = NULL); #define FC_GC_POLL_EX(ret) \ { \ INCONTRACT(Thread::TriggersGC(GetThread());) \ INCONTRACT(__fCallCheck.SetDidPoll();) \ if (g_TrapReturningThreads.LoadWithoutBarrier()) \ { \ if (FC_GCPoll(__me)) \ return ret; \ while (0 == FC_NO_TAILCALL) { }; /* side effect the compile can't remove */ \ } \ } #define FC_GC_POLL() FC_GC_POLL_EX(;) #define FC_GC_POLL_RET() FC_GC_POLL_EX(0) #define FC_GC_POLL_AND_RETURN_OBJREF(obj) \ { \ INCONTRACT(__fCallCheck.SetDidPoll();) \ Object* __temp = OBJECTREFToObject(obj); \ if (g_TrapReturningThreads.LoadWithoutBarrier()) \ { \ __temp = FC_GCPoll(__me, __temp); \ while (0 == FC_NO_TAILCALL) { }; /* side effect the compile can't remove */ \ } \ return __temp; \ } #if defined(ENABLE_CONTRACTS) #define FC_CAN_TRIGGER_GC() FCallGCCanTrigger::Enter() #define FC_CAN_TRIGGER_GC_END() FCallGCCanTrigger::Leave(__FUNCTION__, __FILE__, __LINE__) #define FC_CAN_TRIGGER_GC_HAVE_THREAD(thread) FCallGCCanTrigger::Enter(thread) #define FC_CAN_TRIGGER_GC_HAVE_THREADEND(thread) FCallGCCanTrigger::Leave(thread, __FUNCTION__, __FILE__, __LINE__) // turns on forbidGC for the lifetime of the instance class ForbidGC { protected: Thread *m_pThread; public: ForbidGC(const char *szFile, int lineNum); ~ForbidGC(); }; // this little helper class checks to make certain // 1) ForbidGC is set throughout the routine. // 2) Sometime during the routine, a GC poll is done class FCallCheck : public ForbidGC { public: FCallCheck(const char *szFile, int lineNum); ~FCallCheck(); void SetDidPoll() {LIMITED_METHOD_CONTRACT; didGCPoll = true; } void SetNotNeeded() {LIMITED_METHOD_CONTRACT; notNeeded = true; } private: #ifdef _DEBUG DWORD unbreakableLockCount; #endif bool didGCPoll; // GC poll was done bool notNeeded; // GC poll not needed unsigned __int64 startTicks; // tick count at beginning of FCall }; // FC_COMMON_PROLOG is used for both FCalls and HCalls #define FC_COMMON_PROLOG(target, assertFn) \ /* The following line has to be first. We do not want to trash last error */ \ DWORD __lastError = ::GetLastError(); \ static void* __cache = 0; \ assertFn(__cache, (LPVOID)target); \ { \ Thread *_pThread = GetThread(); \ Thread::ObjectRefFlush(_pThread); \ } \ FCallCheck __fCallCheck(__FILE__, __LINE__); \ FCALL_TRANSITION_BEGIN(); \ ::SetLastError(__lastError); \ void FCallAssert(void*& cache, void* target); void HCallAssert(void*& cache, void* target); #else #define FC_COMMON_PROLOG(target, assertFn) FCALL_TRANSITION_BEGIN() #define FC_CAN_TRIGGER_GC() #define FC_CAN_TRIGGER_GC_END() #endif // ENABLE_CONTRACTS // #FC_INNER // Macros that allows fcall to be split into two function to avoid the helper frame overhead on common fast // codepaths. // // The helper routine needs to know the name of the routine that called it so that it can look up the name of // the managed routine this code is associted with (for managed stack traces). This is passed with the // FC_INNER_PROLOG macro. // // The helper can set up a HELPER_METHOD_FRAME, but should pass the // Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2 which indicates the exact number of // unwinds to do to get back to managed code. Currently we only support depth 2 which means that the // HELPER_METHOD_FRAME needs to be set up in the function directly called by the FCALL. The helper should // use the NOINLINE macro to prevent the compiler from inlining it into the FCALL (which would obviously // mess up the unwind count). // // The other invarient that needs to hold is that the epilog walker needs to be able to get from the call to // the helper routine to the end of the FCALL using trivial heurisitics. The easiest (and only supported) // way of doing this is to place your helper right before a return (eg at the end of the method). Generally // this is not a problem at all, since the FCALL itself will pick off some common case and then tail-call to // the helper for everything else. You must use the code:FC_INNER_RETURN macros to do the call, to insure // that the C++ compiler does not tail-call optimize the call to the inner function and mess up the stack // depth. // // see code:ObjectNative::GetClass for an example // #define FC_INNER_PROLOG(outerfuncname) \ LPVOID __me; \ __me = GetEEFuncEntryPointMacro(outerfuncname); \ FC_CAN_TRIGGER_GC(); \ INCONTRACT(FCallCheck __fCallCheck(__FILE__, __LINE__)); // This variant should be used for inner fcall functions that have the // __me value passed as an argument to the function. This allows // inner functions to be shared across multiple fcalls. #define FC_INNER_PROLOG_NO_ME_SETUP() \ FC_CAN_TRIGGER_GC(); \ INCONTRACT(FCallCheck __fCallCheck(__FILE__, __LINE__)); #define FC_INNER_EPILOG() \ FC_CAN_TRIGGER_GC_END(); // If you are using FC_INNER, and you are tail calling to the helper method (a common case), then you need // to use the FC_INNER_RETURN macros (there is one for methods that return a value and another if the // function returns void). This macro's purpose is to inhibit any tail calll optimization the C++ compiler // might do, which would otherwise confuse the epilog walker. // // * See #FC_INNER for more extern RAW_KEYWORD(volatile) int FC_NO_TAILCALL; #define FC_INNER_RETURN(type, expr) \ type __retVal = expr; \ while (0 == FC_NO_TAILCALL) { }; /* side effect the compile can't remove */ \ return(__retVal); #define FC_INNER_RETURN_VOID(stmt) \ stmt; \ while (0 == FC_NO_TAILCALL) { }; /* side effect the compile can't remove */ \ return; //============================================================================================== // FIMPLn: A set of macros for generating the proto for the actual // implementation (use FDECLN for header protos.) // // The hidden "__me" variable lets us recover the original MethodDesc* // so any thrown exceptions will have the correct stack trace. FCThrow() // passes this along to __FCThrowInternal(). //============================================================================================== #define GetEEFuncEntryPointMacro(func) ((LPVOID)(func)) #define FCIMPL_PROLOG(funcname) \ LPVOID __me; \ __me = GetEEFuncEntryPointMacro(funcname); \ FC_COMMON_PROLOG(__me, FCallAssert) #if defined(_DEBUG) && !defined(__GNUC__) // Build the list of all fcalls signatures. It is used in binder.cpp to verify // compatibility of managed and unmanaged fcall signatures. The check is currently done // for x86 only. #define CHECK_FCALL_SIGNATURE #endif #ifdef CHECK_FCALL_SIGNATURE struct FCSigCheck { public: FCSigCheck(void* fnc, const char* sig) { LIMITED_METHOD_CONTRACT; func = fnc; signature = sig; next = g_pFCSigCheck; g_pFCSigCheck = this; } FCSigCheck* next; void* func; const char* signature; static FCSigCheck* g_pFCSigCheck; }; #define FCSIGCHECK(funcname, signature) \ static FCSigCheck UNIQUE_LABEL(FCSigCheck)(GetEEFuncEntryPointMacro(funcname), signature); #else // CHECK_FCALL_SIGNATURE #define FCSIGCHECK(funcname, signature) #endif // !CHECK_FCALL_SIGNATURE #ifdef SWIZZLE_STKARG_ORDER #ifdef SWIZZLE_REGARG_ORDER #define FCIMPL0(rettype, funcname) rettype F_CALL_CONV funcname() { FCIMPL_PROLOG(funcname) #define FCIMPL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VI(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IIV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VII(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a3, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a3, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a3, a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a3, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a3, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a3, a1, a5, a4, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a3, a2, a5, a4, a1) { FCIMPL_PROLOG(funcname) #else // SWIZZLE_REGARG_ORDER #define FCIMPL0(rettype, funcname) FCSIGCHECK(funcname, #rettype) \ rettype F_CALL_CONV funcname() { FCIMPL_PROLOG(funcname) #define FCIMPL1(rettype, funcname, a1) FCSIGCHECK(funcname, #rettype "," #a1) \ rettype F_CALL_CONV funcname(a1) { FCIMPL_PROLOG(funcname) #define FCIMPL1_V(rettype, funcname, a1) FCSIGCHECK(funcname, #rettype "," "V" #a1) \ rettype F_CALL_CONV funcname(a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2) \ rettype F_CALL_CONV funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL2VA(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," "...") \ rettype F_CALL_VA_CONV funcname(a1, a2, ...) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VV(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," "V" #a2) \ rettype F_CALL_CONV funcname(a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VI(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," #a2) \ rettype F_CALL_CONV funcname(a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2_IV(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," #a1 "," "V" #a2) \ rettype F_CALL_CONV funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3) \ rettype F_CALL_CONV funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IIV(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," "V" #a3) \ rettype F_CALL_CONV funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VII(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," #a2 "," #a3) \ rettype F_CALL_CONV funcname(a2, a3, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVV(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," #a1 "," "V" #a2 "," "V" #a3) \ rettype F_CALL_CONV funcname(a1, a3, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVI(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," #a1 "," "V" #a2 "," #a3) \ rettype F_CALL_CONV funcname(a1, a3, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVI(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," "V" #a2 "," #a3) \ rettype F_CALL_CONV funcname(a2, a1, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVV(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," "V" #a2 "," "V" #a3) \ rettype F_CALL_CONV funcname(a3, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL4(rettype, funcname, a1, a2, a3, a4) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4) \ rettype F_CALL_CONV funcname(a1, a2, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5) \ rettype F_CALL_CONV funcname(a1, a2, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL6(rettype, funcname, a1, a2, a3, a4, a5, a6) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6) \ rettype F_CALL_CONV funcname(a1, a2, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7) \ rettype F_CALL_CONV funcname(a1, a2, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8) \ rettype F_CALL_CONV funcname(a1, a2, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9) \ rettype F_CALL_CONV funcname(a1, a2, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10) \ rettype F_CALL_CONV funcname(a1, a2, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10 "," #a11) \ rettype F_CALL_CONV funcname(a1, a2, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10 "," #a11 "," #a12) \ rettype F_CALL_CONV funcname(a1, a2, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10 "," #a11 "," #a12 "," #a13) \ rettype F_CALL_CONV funcname(a1, a2, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10 "," #a11 "," #a12 "," #a13 "," #a14) \ rettype F_CALL_CONV funcname(a1, a2, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) FCSIGCHECK(funcname, #rettype "," #a1 "," "V" #a2 "," #a3 "," #a4 "," #a5) \ rettype F_CALL_CONV funcname(a1, a3, a5, a4, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL5_VII(rettype, funcname, a1, a2, a3, a4, a5) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," #a2 "," #a3 "," #a4 "," #a5) \ rettype F_CALL_CONV funcname(a2, a3, a5, a4, a1) { FCIMPL_PROLOG(funcname) #endif // !SWIZZLE_REGARG_ORDER #else // SWIZZLE_STKARG_ORDER #define FCIMPL0(rettype, funcname) rettype funcname() { FCIMPL_PROLOG(funcname) #define FCIMPL1(rettype, funcname, a1) rettype funcname(a1) { FCIMPL_PROLOG(funcname) #define FCIMPL1_V(rettype, funcname, a1) rettype funcname(a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2(rettype, funcname, a1, a2) rettype funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL2VA(rettype, funcname, a1, a2) rettype funcname(a1, a2, ...) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VV(rettype, funcname, a1, a2) rettype funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VI(rettype, funcname, a1, a2) rettype funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL2_IV(rettype, funcname, a1, a2) rettype funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IIV(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVV(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VII(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVI(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVI(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVV(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype funcname(a1, a2, a3, a4) { FCIMPL_PROLOG(funcname) #define FCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype funcname(a1, a2, a3, a4, a5) { FCIMPL_PROLOG(funcname) #define FCIMPL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype funcname(a1, a2, a3, a4, a5, a6) { FCIMPL_PROLOG(funcname) #define FCIMPL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype funcname(a1, a2, a3, a4, a5, a6, a7) { FCIMPL_PROLOG(funcname) #define FCIMPL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8) { FCIMPL_PROLOG(funcname) #define FCIMPL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9) { FCIMPL_PROLOG(funcname) #define FCIMPL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) { FCIMPL_PROLOG(funcname) #define FCIMPL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) { FCIMPL_PROLOG(funcname) #define FCIMPL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) { FCIMPL_PROLOG(funcname) #define FCIMPL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) { FCIMPL_PROLOG(funcname) #define FCIMPL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) { FCIMPL_PROLOG(funcname) #define FCIMPL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype funcname(a1, a2, a3, a4, a5) { FCIMPL_PROLOG(funcname) #define FCIMPL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype funcname(a1, a2, a3, a4, a5) { FCIMPL_PROLOG(funcname) #endif // !SWIZZLE_STKARG_ORDER //============================================================================================== // Use this to terminte an FCIMPLEND. //============================================================================================== #define FCIMPL_EPILOG() FCALL_TRANSITION_END() #define FCIMPLEND FCIMPL_EPILOG(); } #define HCIMPL_PROLOG(funcname) LPVOID __me; __me = 0; FC_COMMON_PROLOG(funcname, HCallAssert) // HCIMPL macros are just like their FCIMPL counterparts, however // they do not remember the function they come from. Thus they will not // show up in a stack trace. This is what you want for JIT helpers and the like #ifdef SWIZZLE_STKARG_ORDER #ifdef SWIZZLE_REGARG_ORDER #define HCIMPL0(rettype, funcname) rettype F_CALL_CONV funcname() { HCIMPL_PROLOG(funcname) #define HCIMPL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1) { HCIMPL_PROLOG(funcname) #define HCIMPL1_RAW(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1) { #define HCIMPL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2_RAW(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1) { #define HCIMPL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a2, a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) { HCIMPL_PROLOG(funcname) #define HCIMPL3_RAW(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) { #define HCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a4, a3) { HCIMPL_PROLOG(funcname) #define HCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a5, a4, a3) { HCIMPL_PROLOG(funcname) #define HCCALL0(funcname) funcname() #define HCCALL1(funcname, a1) funcname(0, 0, a1) #define HCCALL1_V(funcname, a1) funcname(0, 0, 0, a1) #define HCCALL2(funcname, a1, a2) funcname(0, a2, a1) #define HCCALL3(funcname, a1, a2, a3) funcname(0, a2, a1, a3) #define HCCALL4(funcname, a1, a2, a3, a4) funcname(0, a2, a1, a4, a3) #define HCCALL5(funcname, a1, a2, a3, a4, a5) funcname(0, a2, a1, a5, a4, a3) #define HCCALL1_PTR(rettype, funcptr, a1) rettype (F_CALL_CONV * funcptr)(int /* EAX */, int /* EDX */, a1) #define HCCALL2_PTR(rettype, funcptr, a1, a2) rettype (F_CALL_CONV * funcptr)(int /* EAX */, a2, a1) #else // SWIZZLE_REGARG_ORDER #define HCIMPL0(rettype, funcname) rettype F_CALL_CONV funcname() { HCIMPL_PROLOG(funcname) #define HCIMPL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { HCIMPL_PROLOG(funcname) #define HCIMPL1_RAW(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { #define HCIMPL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL2_RAW(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { #define HCIMPL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a2, a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) { HCIMPL_PROLOG(funcname) #define HCIMPL3_RAW(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) { #define HCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(a1, a2, a4, a3) { HCIMPL_PROLOG(funcname) #define HCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a5, a4, a3) { HCIMPL_PROLOG(funcname) #define HCCALL0(funcname) funcname() #define HCCALL1(funcname, a1) funcname(a1) #define HCCALL1_V(funcname, a1) funcname(a1) #define HCCALL2(funcname, a1, a2) funcname(a1, a2) #define HCCALL3(funcname, a1, a2, a3) funcname(a1, a2, a3) #define HCCALL4(funcname, a1, a2, a3, a4) funcname(a1, a2, a4, a3) #define HCCALL5(funcname, a1, a2, a3, a4, a5) funcname(a1, a2, a5, a4, a3) #define HCCALL1_PTR(rettype, funcptr, a1) rettype (F_CALL_CONV * (funcptr))(a1) #define HCCALL2_PTR(rettype, funcptr, a1, a2) rettype (F_CALL_CONV * (funcptr))(a1, a2) #endif // !SWIZZLE_REGARG_ORDER #else // SWIZZLE_STKARG_ORDER #define HCIMPL0(rettype, funcname) rettype F_CALL_CONV funcname() { HCIMPL_PROLOG(funcname) #define HCIMPL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { HCIMPL_PROLOG(funcname) #define HCIMPL1_RAW(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { #define HCIMPL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL2_RAW(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { #define HCIMPL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) { HCIMPL_PROLOG(funcname) #define HCIMPL3_RAW(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) { #define HCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(a1, a2, a3, a4) { HCIMPL_PROLOG(funcname) #define HCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5) { HCIMPL_PROLOG(funcname) #define HCCALL0(funcname) funcname() #define HCCALL1(funcname, a1) funcname(a1) #define HCCALL1_V(funcname, a1) funcname(a1) #define HCCALL2(funcname, a1, a2) funcname(a1, a2) #define HCCALL3(funcname, a1, a2, a3) funcname(a1, a2, a3) #define HCCALL4(funcname, a1, a2, a3, a4) funcname(a1, a2, a3, a4) #define HCCALL5(funcname, a1, a2, a3, a4, a5) funcname(a1, a2, a3, a4, a5) #define HCCALL1_PTR(rettype, funcptr, a1) rettype (F_CALL_CONV * (funcptr))(a1) #define HCCALL2_PTR(rettype, funcptr, a1, a2) rettype (F_CALL_CONV * (funcptr))(a1, a2) #endif // !SWIZZLE_STKARG_ORDER #define HCIMPLEND_RAW } #define HCIMPLEND FCALL_TRANSITION_END(); } //============================================================================================== // Throws an exception from an FCall. See rexcep.h for a list of valid // exception codes. //============================================================================================== #define FCThrow(reKind) FCThrowEx(reKind, 0, 0, 0, 0) //============================================================================================== // This version lets you attach a message with inserts (similar to // COMPlusThrow()). //============================================================================================== #define FCThrowEx(reKind, resID, arg1, arg2, arg3) \ { \ while (NULL == \ __FCThrow(__me, reKind, resID, arg1, arg2, arg3)) {}; \ return 0; \ } //============================================================================================== // Like FCThrow but can be used for a VOID-returning FCall. The only // difference is in the "return" statement. //============================================================================================== #define FCThrowVoid(reKind) FCThrowExVoid(reKind, 0, 0, 0, 0) //============================================================================================== // This version lets you attach a message with inserts (similar to // COMPlusThrow()). //============================================================================================== #define FCThrowExVoid(reKind, resID, arg1, arg2, arg3) \ { \ while (NULL == \ __FCThrow(__me, reKind, resID, arg1, arg2, arg3)) {}; \ return; \ } // Use FCThrowRes to throw an exception with a localized error message from the // ResourceManager in managed code. #define FCThrowRes(reKind, resourceName) FCThrowArgumentEx(reKind, NULL, resourceName) #define FCThrowArgumentNull(argName) FCThrowArgumentEx(kArgumentNullException, argName, NULL) #define FCThrowArgumentOutOfRange(argName, message) FCThrowArgumentEx(kArgumentOutOfRangeException, argName, message) #define FCThrowArgument(argName, message) FCThrowArgumentEx(kArgumentException, argName, message) #define FCThrowArgumentEx(reKind, argName, resourceName) \ { \ while (NULL == \ __FCThrowArgument(__me, reKind, argName, resourceName)) {}; \ return 0; \ } // Use FCThrowRes to throw an exception with a localized error message from the // ResourceManager in managed code. #define FCThrowResVoid(reKind, resourceName) FCThrowArgumentVoidEx(reKind, NULL, resourceName) #define FCThrowArgumentNullVoid(argName) FCThrowArgumentVoidEx(kArgumentNullException, argName, NULL) #define FCThrowArgumentOutOfRangeVoid(argName, message) FCThrowArgumentVoidEx(kArgumentOutOfRangeException, argName, message) #define FCThrowArgumentVoid(argName, message) FCThrowArgumentVoidEx(kArgumentException, argName, message) #define FCThrowArgumentVoidEx(reKind, argName, resourceName) \ { \ while (NULL == \ __FCThrowArgument(__me, reKind, argName, resourceName)) {}; \ return; \ } // The x86 JIT calling convention expects returned small types (e.g. bool) to be // widened on return. The C/C++ calling convention does not guarantee returned // small types to be widened. The small types has to be artifically widened on return // to fit x86 JIT calling convention. Thus fcalls returning small types has to // use the FC_XXX_RET types to force C/C++ compiler to do the widening. // // The most common small return type of FCALLs is bool. The widening of bool is // especially tricky since the value has to be also normalized. FC_BOOL_RET and // FC_RETURN_BOOL macros are provided to make it fool-proof. FCALLs returning bool // should be implemented using following pattern: // FCIMPL0(FC_BOOL_RET, Foo) // the return type should be FC_BOOL_RET // BOOL ret; // // FC_RETURN_BOOL(ret); // return statements should be FC_RETURN_BOOL // FCIMPLEND // This rules are verified in binder.cpp if COMPlus_ConsistencyCheck is set. #ifdef _PREFAST_ // Use prefast build to ensure that functions returning FC_BOOL_RET // are using FC_RETURN_BOOL to return it. Missing FC_RETURN_BOOL will // result into type mismatch error in prefast builds. This will also // catch misuses of FC_BOOL_RET for other places (e.g. in FCALL parameters). typedef LPVOID FC_BOOL_RET; #define FC_RETURN_BOOL(x) do { return (LPVOID)!!(x); } while(0) #else #if defined(TARGET_X86) || defined(TARGET_AMD64) // The return value is artifically widened on x86 and amd64 typedef INT32 FC_BOOL_RET; #else typedef CLR_BOOL FC_BOOL_RET; #endif #define FC_RETURN_BOOL(x) do { return !!(x); } while(0) #endif #if defined(TARGET_X86) || defined(TARGET_AMD64) // The return value is artifically widened on x86 and amd64 typedef UINT32 FC_CHAR_RET; typedef INT32 FC_INT8_RET; typedef UINT32 FC_UINT8_RET; typedef INT32 FC_INT16_RET; typedef UINT32 FC_UINT16_RET; #else typedef CLR_CHAR FC_CHAR_RET; typedef INT8 FC_INT8_RET; typedef UINT8 FC_UINT8_RET; typedef INT16 FC_INT16_RET; typedef UINT16 FC_UINT16_RET; #endif // FC_TypedByRef should be used for TypedReferences in FCall signatures #define FC_TypedByRef TypedByRef #define FC_DECIMAL DECIMAL // The fcall entrypoints has to be at unique addresses. Use this helper macro to make // the code of the fcalls unique if you get assert in ecall.cpp that mentions it. // The parameter of the FCUnique macro is an arbitrary 32-bit random non-zero number. #define FCUnique(unique) { Volatile<int> u = (unique); while (u.LoadWithoutBarrier() == 0) { }; } // FCALL contracts come in two forms: // // Short form that should be used if the FCALL contract does not have any extras like preconditions, failure injection. Example: // // FCIMPL0(void, foo) // { // FCALL_CONTRACT; // ... // // Long form that should be used otherwise. Example: // // FCIMPL1(void, foo, void *p) // { // CONTRACTL { // FCALL_CHECK; // PRECONDITION(CheckPointer(p)); // } CONTRACTL_END; // ... // // FCALL_CHECK defines the actual contract conditions required for FCALLs // #define FCALL_CHECK \ THROWS; \ DISABLED(GC_TRIGGERS); /* FCALLS with HELPER frames have issues with GC_TRIGGERS */ \ MODE_COOPERATIVE; // // FCALL_CONTRACT should be the following shortcut: // // #define FCALL_CONTRACT CONTRACTL { FCALL_CHECK; } CONTRACTL_END; // // Since there is very little value in having runtime contracts in FCalls, FCALL_CONTRACT is defined as static contract only for performance reasons. // #define FCALL_CONTRACT \ STATIC_CONTRACT_THROWS; \ /* FCALLS are a special case contract wise, they are "NOTRIGGER, unless you setup a frame" */ \ STATIC_CONTRACT_GC_NOTRIGGER; \ STATIC_CONTRACT_MODE_COOPERATIVE #endif //__FCall_h__
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/pal/tests/palsuite/exception_handling/RaiseException/test3/test.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================= ** ** Source: test.c (exception_handling\raiseexception\test3) ** ** Purpose: Tests that the correct ExceptionCode is passed ** to the filter by RaiseException ** ** **============================================================*/ #include <palsuite.h> BOOL bFilter_RaiseException_test3 = FALSE; BOOL bTry_RaiseException_test3 = FALSE; BOOL bExcept_RaiseException_test3 = FALSE; /** ** ** Filter function that checks for the parameters ** **/ LONG Filter_test1_RaiseException_test3(EXCEPTION_POINTERS* ep, VOID* unused) { /* let the main know we've hit the filter function */ bFilter_RaiseException_test3 = TRUE; if (!bTry_RaiseException_test3) { Fail("PAL_EXCEPT_FILTER_EX: ERROR -> Something weird is going on." " The filter was hit without PAL_TRY being hit.\n"); } /* was the correct exception code passed? */ if (ep->ExceptionRecord->ExceptionCode != EXCEPTION_ARRAY_BOUNDS_EXCEEDED) { Fail("RaiseException: ERROR -> ep->ExceptionRecord->ExceptionCode" " was %x when it was expected to be %x\n", ep->ExceptionRecord->ExceptionCode, EXCEPTION_ARRAY_BOUNDS_EXCEEDED); } return EXCEPTION_EXECUTE_HANDLER; } PALTEST(exception_handling_RaiseException_test3_paltest_raiseexception_test3, "exception_handling/RaiseException/test3/paltest_raiseexception_test3") { bExcept_RaiseException_test3 = FALSE; if (0 != PAL_Initialize(argc, argv)) { return FAIL; } /******************************************************** * Test that the correct arguments are passed * to the filter by RaiseException */ PAL_TRY(VOID*, unused, NULL) { bTry_RaiseException_test3 = TRUE; /* indicate we hit the PAL_TRY block */ RaiseException(EXCEPTION_ARRAY_BOUNDS_EXCEEDED, 0, 0,NULL); Fail("RaiseException: ERROR -> code was executed after the " "exception was raised.\n"); } PAL_EXCEPT_FILTER(Filter_test1_RaiseException_test3) { if (!bTry_RaiseException_test3) { Fail("RaiseException: ERROR -> Something weird is going on." " PAL_EXCEPT_FILTER was hit without PAL_TRY being hit.\n"); } bExcept_RaiseException_test3 = TRUE; /* indicate we hit the PAL_EXCEPT_FILTER_EX block */ } PAL_ENDTRY; if (!bTry_RaiseException_test3) { Trace("RaiseException: ERROR -> It appears the code in the " "PAL_TRY block was not executed.\n"); } if (!bExcept_RaiseException_test3) { Trace("RaiseException: ERROR -> It appears the code in the " "PAL_EXCEPT_FILTER_EX block was not executed.\n"); } if (!bFilter_RaiseException_test3) { Trace("RaiseException: ERROR -> It appears the code in the" " filter function was not executed.\n"); } /* did we hit all the code blocks? */ if(!bTry_RaiseException_test3 || !bExcept_RaiseException_test3 || !bFilter_RaiseException_test3) { Fail(""); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================= ** ** Source: test.c (exception_handling\raiseexception\test3) ** ** Purpose: Tests that the correct ExceptionCode is passed ** to the filter by RaiseException ** ** **============================================================*/ #include <palsuite.h> BOOL bFilter_RaiseException_test3 = FALSE; BOOL bTry_RaiseException_test3 = FALSE; BOOL bExcept_RaiseException_test3 = FALSE; /** ** ** Filter function that checks for the parameters ** **/ LONG Filter_test1_RaiseException_test3(EXCEPTION_POINTERS* ep, VOID* unused) { /* let the main know we've hit the filter function */ bFilter_RaiseException_test3 = TRUE; if (!bTry_RaiseException_test3) { Fail("PAL_EXCEPT_FILTER_EX: ERROR -> Something weird is going on." " The filter was hit without PAL_TRY being hit.\n"); } /* was the correct exception code passed? */ if (ep->ExceptionRecord->ExceptionCode != EXCEPTION_ARRAY_BOUNDS_EXCEEDED) { Fail("RaiseException: ERROR -> ep->ExceptionRecord->ExceptionCode" " was %x when it was expected to be %x\n", ep->ExceptionRecord->ExceptionCode, EXCEPTION_ARRAY_BOUNDS_EXCEEDED); } return EXCEPTION_EXECUTE_HANDLER; } PALTEST(exception_handling_RaiseException_test3_paltest_raiseexception_test3, "exception_handling/RaiseException/test3/paltest_raiseexception_test3") { bExcept_RaiseException_test3 = FALSE; if (0 != PAL_Initialize(argc, argv)) { return FAIL; } /******************************************************** * Test that the correct arguments are passed * to the filter by RaiseException */ PAL_TRY(VOID*, unused, NULL) { bTry_RaiseException_test3 = TRUE; /* indicate we hit the PAL_TRY block */ RaiseException(EXCEPTION_ARRAY_BOUNDS_EXCEEDED, 0, 0,NULL); Fail("RaiseException: ERROR -> code was executed after the " "exception was raised.\n"); } PAL_EXCEPT_FILTER(Filter_test1_RaiseException_test3) { if (!bTry_RaiseException_test3) { Fail("RaiseException: ERROR -> Something weird is going on." " PAL_EXCEPT_FILTER was hit without PAL_TRY being hit.\n"); } bExcept_RaiseException_test3 = TRUE; /* indicate we hit the PAL_EXCEPT_FILTER_EX block */ } PAL_ENDTRY; if (!bTry_RaiseException_test3) { Trace("RaiseException: ERROR -> It appears the code in the " "PAL_TRY block was not executed.\n"); } if (!bExcept_RaiseException_test3) { Trace("RaiseException: ERROR -> It appears the code in the " "PAL_EXCEPT_FILTER_EX block was not executed.\n"); } if (!bFilter_RaiseException_test3) { Trace("RaiseException: ERROR -> It appears the code in the" " filter function was not executed.\n"); } /* did we hit all the code blocks? */ if(!bTry_RaiseException_test3 || !bExcept_RaiseException_test3 || !bFilter_RaiseException_test3) { Fail(""); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/pal/tests/palsuite/miscellaneous/IsBadWritePtr/test2/test2.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source: test2.c ** ** Purpose: ** Create three consecuative pages, NOACCES, READWRITE and ** NOACCESS. Check to ensure that the READWRITE page returns 0, to ** ensure that IsBadWritePtr isn't overflowing. Also check the other two ** pages to see that they return non-zero. ** ** **=========================================================*/ #include <palsuite.h> PALTEST(miscellaneous_IsBadWritePtr_test2_paltest_isbadwriteptr_test2, "miscellaneous/IsBadWritePtr/test2/paltest_isbadwriteptr_test2") { LPVOID PageOne, PageTwo, PageThree; if(0 != (PAL_Initialize(argc, argv))) { return FAIL; } /* Reserve enough space for four pages. We'll commit this memory and set the correct access for each page below. */ PageOne = VirtualAlloc(NULL, GetOsPageSize()*4, MEM_RESERVE, PAGE_NOACCESS); if(PageOne == NULL) { Fail("ERROR: VirtualAlloc failed to reserve the required memory.\n"); } /* Set the first Page to PAGE_NOACCESS */ PageOne = VirtualAlloc(PageOne, GetOsPageSize(), MEM_COMMIT, PAGE_NOACCESS); if(PageOne == NULL) { VirtualFree(PageOne,0,MEM_RELEASE); Fail("ERROR: VirtualAlloc failed to commit the required memory " "for the first page.\n"); } /* Set the second Page to PAGE_READWRITE */ PageTwo = VirtualAlloc(((BYTE*)PageOne)+GetOsPageSize(), GetOsPageSize(), MEM_COMMIT, PAGE_READWRITE); if(PageTwo == NULL) { VirtualFree(PageOne,0,MEM_RELEASE); Fail("ERROR: VirtualAlloc failed to allocate the required memory " "for the second page. %d\n",GetLastError()); } /* Set the third Page to PAGE_NOACCESS */ PageThree = VirtualAlloc(((BYTE*)PageTwo) + (2 * GetOsPageSize()), GetOsPageSize(), MEM_COMMIT, PAGE_NOACCESS); if(PageThree == NULL) { VirtualFree(PageOne,0,MEM_RELEASE); Fail("ERROR: VirtualAlloc failed to allocate the required memory. " "For the third page.\n"); } /* Check that calling IsBadWritePtr on the first page returns non-zero */ if(IsBadWritePtr(PageThree,GetOsPageSize()) == 0) { VirtualFree(PageOne,0,MEM_RELEASE); Fail("ERROR: Called IsBadWritePtr on a page which was set NOACCESS " "but the return value was 0, indicating that the memory is " "writable.\n"); } /* Check that calling IsBadWritePtr on the middle page returns 0 */ if(IsBadWritePtr(PageTwo,GetOsPageSize()) != 0) { VirtualFree(PageOne,0,MEM_RELEASE); Fail("ERROR: IsBadWritePtr didn't return 0 when called on a " "page which should have been writable.\n"); } /* Check that calling IsBadWritePtr on the third page returns non-zero */ if(IsBadWritePtr(PageThree,GetOsPageSize()) == 0) { VirtualFree(PageOne,0,MEM_RELEASE); Fail("ERROR: Called IsBadWritePtr on a page which was set NOACCESS " "but the return value was 0, indicating that the memory is " "writable.\n"); } VirtualFree(PageOne,0,MEM_RELEASE); PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source: test2.c ** ** Purpose: ** Create three consecuative pages, NOACCES, READWRITE and ** NOACCESS. Check to ensure that the READWRITE page returns 0, to ** ensure that IsBadWritePtr isn't overflowing. Also check the other two ** pages to see that they return non-zero. ** ** **=========================================================*/ #include <palsuite.h> PALTEST(miscellaneous_IsBadWritePtr_test2_paltest_isbadwriteptr_test2, "miscellaneous/IsBadWritePtr/test2/paltest_isbadwriteptr_test2") { LPVOID PageOne, PageTwo, PageThree; if(0 != (PAL_Initialize(argc, argv))) { return FAIL; } /* Reserve enough space for four pages. We'll commit this memory and set the correct access for each page below. */ PageOne = VirtualAlloc(NULL, GetOsPageSize()*4, MEM_RESERVE, PAGE_NOACCESS); if(PageOne == NULL) { Fail("ERROR: VirtualAlloc failed to reserve the required memory.\n"); } /* Set the first Page to PAGE_NOACCESS */ PageOne = VirtualAlloc(PageOne, GetOsPageSize(), MEM_COMMIT, PAGE_NOACCESS); if(PageOne == NULL) { VirtualFree(PageOne,0,MEM_RELEASE); Fail("ERROR: VirtualAlloc failed to commit the required memory " "for the first page.\n"); } /* Set the second Page to PAGE_READWRITE */ PageTwo = VirtualAlloc(((BYTE*)PageOne)+GetOsPageSize(), GetOsPageSize(), MEM_COMMIT, PAGE_READWRITE); if(PageTwo == NULL) { VirtualFree(PageOne,0,MEM_RELEASE); Fail("ERROR: VirtualAlloc failed to allocate the required memory " "for the second page. %d\n",GetLastError()); } /* Set the third Page to PAGE_NOACCESS */ PageThree = VirtualAlloc(((BYTE*)PageTwo) + (2 * GetOsPageSize()), GetOsPageSize(), MEM_COMMIT, PAGE_NOACCESS); if(PageThree == NULL) { VirtualFree(PageOne,0,MEM_RELEASE); Fail("ERROR: VirtualAlloc failed to allocate the required memory. " "For the third page.\n"); } /* Check that calling IsBadWritePtr on the first page returns non-zero */ if(IsBadWritePtr(PageThree,GetOsPageSize()) == 0) { VirtualFree(PageOne,0,MEM_RELEASE); Fail("ERROR: Called IsBadWritePtr on a page which was set NOACCESS " "but the return value was 0, indicating that the memory is " "writable.\n"); } /* Check that calling IsBadWritePtr on the middle page returns 0 */ if(IsBadWritePtr(PageTwo,GetOsPageSize()) != 0) { VirtualFree(PageOne,0,MEM_RELEASE); Fail("ERROR: IsBadWritePtr didn't return 0 when called on a " "page which should have been writable.\n"); } /* Check that calling IsBadWritePtr on the third page returns non-zero */ if(IsBadWritePtr(PageThree,GetOsPageSize()) == 0) { VirtualFree(PageOne,0,MEM_RELEASE); Fail("ERROR: Called IsBadWritePtr on a page which was set NOACCESS " "but the return value was 0, indicating that the memory is " "writable.\n"); } VirtualFree(PageOne,0,MEM_RELEASE); PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/pal/tests/palsuite/composite/object_management/mutex/nonshared/mutex.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source Code: main.c and mutex.c ** main.c creates process and waits for all processes to get over ** mutex.c creates a mutex and then calls threads which will contend for the mutex ** ** This test is for Object Management Test case for Mutex where Object type is not shareable. ** Algorithm ** o Create PROCESS_COUNT processes. ** o Main Thread of each process creates OBJECT_TYPE Object ** ** Author: ShamitP **============================================================ */ #include <palsuite.h> #include "resultbuffer.h" #include "resulttime.h" #define TIMEOUT 5000 /* Test Input Variables */ unsigned int USE_PROCESS_COUNT = 0; unsigned int THREAD_COUNT = 0; unsigned int REPEAT_COUNT = 0; unsigned int RELATION_ID = 1001; /* Capture statistics at per thread basis */ struct statistics{ unsigned int processId; unsigned int operationsFailed; unsigned int operationsPassed; unsigned int operationsTotal; DWORD operationTime; unsigned int relationId; }; struct ProcessStats{ unsigned int processId; DWORD operationTime; unsigned int relationId; }; HANDLE StartTestsEvHandle = NULL; HANDLE hMutexHandle = NULL; /* Results Buffer */ ResultBuffer *resultBuffer = NULL; int testStatus; void PALAPI Run_Thread_mutex_nonshared(LPVOID lpParam); int GetParameters( int argc, char **argv) { if( (argc != 5) || ((argc == 1) && !strcmp(argv[1],"/?")) || !strcmp(argv[1],"/h") || !strcmp(argv[1],"/H")) { printf("PAL -Composite Object Management Mutex Test\n"); printf("Usage:\n"); printf("mutex\n\t[USE_PROCESS_COUNT ( greater than 1] \n"); printf("\t[THREAD_COUNT ( greater than 1] \n"); printf("\t[REPEAT_COUNT ( greater than 1]\n"); printf("\t[RELATION_ID [greater than 1]\n"); return -1; } USE_PROCESS_COUNT = atoi(argv[1]); if( USE_PROCESS_COUNT < 0) { printf("\nInvalid USE_PROCESS_COUNT number, Pass greater than 1\n"); return -1; } THREAD_COUNT = atoi(argv[2]); if( (THREAD_COUNT < 1) || (THREAD_COUNT > MAXIMUM_WAIT_OBJECTS) ) { printf("\nInvalid THREAD_COUNT number, Pass greater than 1 and less than %d\n", MAXIMUM_WAIT_OBJECTS); return -1; } REPEAT_COUNT = atoi(argv[3]); if( REPEAT_COUNT < 1) { printf("\nInvalid REPEAT_COUNT number, Pass greater than 1\n"); return -1; } RELATION_ID = atoi(argv[4]); if( RELATION_ID < 1) { printf("\nMain Process:Invalid RELATION_ID number, Pass greater than 1\n"); return -1; } return 0; } PALTEST(composite_object_management_mutex_nonshared_paltest_mutex_nonshared, "composite/object_management/mutex/nonshared/paltest_mutex_nonshared") { unsigned int i = 0; HANDLE hThread[MAXIMUM_WAIT_OBJECTS]; DWORD threadId[MAXIMUM_WAIT_OBJECTS]; const char sTmpEventName[MAX_PATH] = "StartTestEvent"; DWORD dwParam = 0; int returnCode = 0; /* Variables to capture the file name and the file pointer at thread level*/ char fileName[MAX_PATH]; FILE *pFile = NULL; struct statistics* buffer = NULL; int statisticsSize = 0; /* Variables to capture the file name and the file pointer at process level*/ char processFileName[MAX_PATH]; FILE *pProcessFile = NULL; struct ProcessStats processStats; DWORD dwStartTime; testStatus = PASS; if(0 != (PAL_Initialize(argc, argv))) { return ( FAIL ); } if(GetParameters(argc, argv)) { Fail("Error in obtaining the parameters\n"); } /* Register the start time */ dwStartTime = GetTickCount(); processStats.relationId = RELATION_ID; processStats.processId = USE_PROCESS_COUNT; _snprintf(processFileName, MAX_PATH, "%d_process_mutex_%d_.txt", USE_PROCESS_COUNT,RELATION_ID); pProcessFile = fopen(processFileName, "w+"); if(pProcessFile == NULL) { Fail("Error in opening process File file for write for process [%d]\n", USE_PROCESS_COUNT); } statisticsSize = sizeof(struct statistics); _snprintf(fileName, MAX_PATH, "%d_thread_mutex_%d_.txt", USE_PROCESS_COUNT, RELATION_ID); pFile = fopen(fileName, "w+"); if(pFile == NULL) { Fail("Error in opening file for write for process [%d]\n", USE_PROCESS_COUNT); } // For each thread we will log operations failed (int), passed (int), total (int) // and number of ticks (DWORD) for the operations resultBuffer = new ResultBuffer( THREAD_COUNT, statisticsSize); StartTestsEvHandle = CreateEvent( NULL, /* lpEventAttributes*/ TRUE, /* bManualReset */ FALSE, /* bInitialState */ NULL ); /* name of Event */ if( StartTestsEvHandle == NULL ) { Fail("Error:%d: Unexpected failure " "to create %s Event for process count %d\n", GetLastError(), sTmpEventName, USE_PROCESS_COUNT ); } /* Create StartTest Event */ hMutexHandle = CreateMutex( NULL, FALSE, /* bInitialOwner, owns initially */ NULL ); if( hMutexHandle == NULL) { Fail("Unable to create Mutex handle for process id [%d], returned error [%d]\n", i, GetLastError()); } /* We already assume that the mutex was created previously*/ for( i = 0; i < THREAD_COUNT; i++ ) { dwParam = (int) i; //Create thread hThread[i] = CreateThread( NULL, /* no security attributes */ 0, /* use default stack size */ (LPTHREAD_START_ROUTINE)Run_Thread_mutex_nonshared,/* thread function */ (LPVOID)dwParam, /* argument to thread function */ 0, /* use default creation flags */ &threadId[i] /* returns the thread identifier*/ ); if(hThread[i] == NULL) { Fail("Create Thread failed for %d process, and GetLastError value is %d\n", USE_PROCESS_COUNT, GetLastError()); } } if (!SetEvent(StartTestsEvHandle)) { Fail("Set Event for Start Tests failed for %d process, and GetLastError value is %d\n", USE_PROCESS_COUNT, GetLastError()); } /* Test running */ returnCode = WaitForMultipleObjects( THREAD_COUNT, hThread, TRUE, INFINITE); if( WAIT_OBJECT_0 != returnCode ) { Trace("Wait for Object(s) for %d process returned %d, and GetLastError value is %d\n", USE_PROCESS_COUNT, returnCode, GetLastError()); testStatus = FAIL; } processStats.operationTime = GetTimeDiff(dwStartTime); /* Write to a file*/ if(pFile!= NULL) { for( i = 0; i < THREAD_COUNT; i++ ) { buffer = (struct statistics *)resultBuffer->getResultBuffer(i); returnCode = fprintf(pFile, "%d,%d,%d,%d,%lu,%d\n", buffer->processId, buffer->operationsFailed, buffer->operationsPassed, buffer->operationsTotal, buffer->operationTime, buffer->relationId ); } } fclose(pFile); fprintf(pProcessFile, "%d,%d,%d\n", USE_PROCESS_COUNT, processStats.operationTime, processStats.relationId ); fclose(pProcessFile); /* Logging for the test case over, clean up the handles */ for( i = 0; i < THREAD_COUNT; i++ ) { if(!CloseHandle(hThread[i]) ) { Trace("Error:%d: CloseHandle failed for Process [%d] hThread[%d]\n", GetLastError(), USE_PROCESS_COUNT, i); testStatus = FAIL; } } if(!CloseHandle(StartTestsEvHandle)) { Trace("Error:%d: CloseHandle failed for Process [%d] StartTestsEvHandle\n", GetLastError(), USE_PROCESS_COUNT); testStatus = FAIL; } if(!CloseHandle(hMutexHandle)) { Trace("Error:%d: CloseHandle failed for Process [%d] hMutexHandle\n", GetLastError(), USE_PROCESS_COUNT); testStatus = FAIL; } PAL_Terminate(); return testStatus; } void PALAPI Run_Thread_mutex_nonshared (LPVOID lpParam) { unsigned int i = 0; DWORD dwWaitResult; struct statistics stats; DWORD dwStartTime; stats.relationId = RELATION_ID; stats.processId = USE_PROCESS_COUNT; stats.operationsFailed = 0; stats.operationsPassed = 0; stats.operationsTotal = 0; stats.operationTime = 0; int Id=(int)lpParam; dwWaitResult = WaitForSingleObject( StartTestsEvHandle, // handle to mutex TIMEOUT); if(dwWaitResult != WAIT_OBJECT_0) { Trace("Error while waiting for StartTest Event@ thread %d\n", Id); testStatus = FAIL; } dwStartTime = GetTickCount(); for( i = 0; i < REPEAT_COUNT; i++ ) { dwWaitResult = WaitForSingleObject( hMutexHandle, // handle to mutex TIMEOUT); if(dwWaitResult != WAIT_OBJECT_0) { stats.operationsFailed += 1; stats.operationsTotal += 1; testStatus = FAIL; continue; } if (! ReleaseMutex(hMutexHandle)) { // Deal with error. stats.operationsFailed += 1; stats.operationsTotal += 1; // Probably need to have while true loop to attempt to release mutex... testStatus = FAIL; continue; } stats.operationsTotal += 1; stats.operationsPassed += 1; } stats.operationTime = GetTimeDiff(dwStartTime); if(resultBuffer->LogResult(Id, (char *)&stats)) { Fail("Error:%d: while writing to shared memory, Thread Id is[%d] and Process id is [%d]\n", GetLastError(), Id, USE_PROCESS_COUNT); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source Code: main.c and mutex.c ** main.c creates process and waits for all processes to get over ** mutex.c creates a mutex and then calls threads which will contend for the mutex ** ** This test is for Object Management Test case for Mutex where Object type is not shareable. ** Algorithm ** o Create PROCESS_COUNT processes. ** o Main Thread of each process creates OBJECT_TYPE Object ** ** Author: ShamitP **============================================================ */ #include <palsuite.h> #include "resultbuffer.h" #include "resulttime.h" #define TIMEOUT 5000 /* Test Input Variables */ unsigned int USE_PROCESS_COUNT = 0; unsigned int THREAD_COUNT = 0; unsigned int REPEAT_COUNT = 0; unsigned int RELATION_ID = 1001; /* Capture statistics at per thread basis */ struct statistics{ unsigned int processId; unsigned int operationsFailed; unsigned int operationsPassed; unsigned int operationsTotal; DWORD operationTime; unsigned int relationId; }; struct ProcessStats{ unsigned int processId; DWORD operationTime; unsigned int relationId; }; HANDLE StartTestsEvHandle = NULL; HANDLE hMutexHandle = NULL; /* Results Buffer */ ResultBuffer *resultBuffer = NULL; int testStatus; void PALAPI Run_Thread_mutex_nonshared(LPVOID lpParam); int GetParameters( int argc, char **argv) { if( (argc != 5) || ((argc == 1) && !strcmp(argv[1],"/?")) || !strcmp(argv[1],"/h") || !strcmp(argv[1],"/H")) { printf("PAL -Composite Object Management Mutex Test\n"); printf("Usage:\n"); printf("mutex\n\t[USE_PROCESS_COUNT ( greater than 1] \n"); printf("\t[THREAD_COUNT ( greater than 1] \n"); printf("\t[REPEAT_COUNT ( greater than 1]\n"); printf("\t[RELATION_ID [greater than 1]\n"); return -1; } USE_PROCESS_COUNT = atoi(argv[1]); if( USE_PROCESS_COUNT < 0) { printf("\nInvalid USE_PROCESS_COUNT number, Pass greater than 1\n"); return -1; } THREAD_COUNT = atoi(argv[2]); if( (THREAD_COUNT < 1) || (THREAD_COUNT > MAXIMUM_WAIT_OBJECTS) ) { printf("\nInvalid THREAD_COUNT number, Pass greater than 1 and less than %d\n", MAXIMUM_WAIT_OBJECTS); return -1; } REPEAT_COUNT = atoi(argv[3]); if( REPEAT_COUNT < 1) { printf("\nInvalid REPEAT_COUNT number, Pass greater than 1\n"); return -1; } RELATION_ID = atoi(argv[4]); if( RELATION_ID < 1) { printf("\nMain Process:Invalid RELATION_ID number, Pass greater than 1\n"); return -1; } return 0; } PALTEST(composite_object_management_mutex_nonshared_paltest_mutex_nonshared, "composite/object_management/mutex/nonshared/paltest_mutex_nonshared") { unsigned int i = 0; HANDLE hThread[MAXIMUM_WAIT_OBJECTS]; DWORD threadId[MAXIMUM_WAIT_OBJECTS]; const char sTmpEventName[MAX_PATH] = "StartTestEvent"; DWORD dwParam = 0; int returnCode = 0; /* Variables to capture the file name and the file pointer at thread level*/ char fileName[MAX_PATH]; FILE *pFile = NULL; struct statistics* buffer = NULL; int statisticsSize = 0; /* Variables to capture the file name and the file pointer at process level*/ char processFileName[MAX_PATH]; FILE *pProcessFile = NULL; struct ProcessStats processStats; DWORD dwStartTime; testStatus = PASS; if(0 != (PAL_Initialize(argc, argv))) { return ( FAIL ); } if(GetParameters(argc, argv)) { Fail("Error in obtaining the parameters\n"); } /* Register the start time */ dwStartTime = GetTickCount(); processStats.relationId = RELATION_ID; processStats.processId = USE_PROCESS_COUNT; _snprintf(processFileName, MAX_PATH, "%d_process_mutex_%d_.txt", USE_PROCESS_COUNT,RELATION_ID); pProcessFile = fopen(processFileName, "w+"); if(pProcessFile == NULL) { Fail("Error in opening process File file for write for process [%d]\n", USE_PROCESS_COUNT); } statisticsSize = sizeof(struct statistics); _snprintf(fileName, MAX_PATH, "%d_thread_mutex_%d_.txt", USE_PROCESS_COUNT, RELATION_ID); pFile = fopen(fileName, "w+"); if(pFile == NULL) { Fail("Error in opening file for write for process [%d]\n", USE_PROCESS_COUNT); } // For each thread we will log operations failed (int), passed (int), total (int) // and number of ticks (DWORD) for the operations resultBuffer = new ResultBuffer( THREAD_COUNT, statisticsSize); StartTestsEvHandle = CreateEvent( NULL, /* lpEventAttributes*/ TRUE, /* bManualReset */ FALSE, /* bInitialState */ NULL ); /* name of Event */ if( StartTestsEvHandle == NULL ) { Fail("Error:%d: Unexpected failure " "to create %s Event for process count %d\n", GetLastError(), sTmpEventName, USE_PROCESS_COUNT ); } /* Create StartTest Event */ hMutexHandle = CreateMutex( NULL, FALSE, /* bInitialOwner, owns initially */ NULL ); if( hMutexHandle == NULL) { Fail("Unable to create Mutex handle for process id [%d], returned error [%d]\n", i, GetLastError()); } /* We already assume that the mutex was created previously*/ for( i = 0; i < THREAD_COUNT; i++ ) { dwParam = (int) i; //Create thread hThread[i] = CreateThread( NULL, /* no security attributes */ 0, /* use default stack size */ (LPTHREAD_START_ROUTINE)Run_Thread_mutex_nonshared,/* thread function */ (LPVOID)dwParam, /* argument to thread function */ 0, /* use default creation flags */ &threadId[i] /* returns the thread identifier*/ ); if(hThread[i] == NULL) { Fail("Create Thread failed for %d process, and GetLastError value is %d\n", USE_PROCESS_COUNT, GetLastError()); } } if (!SetEvent(StartTestsEvHandle)) { Fail("Set Event for Start Tests failed for %d process, and GetLastError value is %d\n", USE_PROCESS_COUNT, GetLastError()); } /* Test running */ returnCode = WaitForMultipleObjects( THREAD_COUNT, hThread, TRUE, INFINITE); if( WAIT_OBJECT_0 != returnCode ) { Trace("Wait for Object(s) for %d process returned %d, and GetLastError value is %d\n", USE_PROCESS_COUNT, returnCode, GetLastError()); testStatus = FAIL; } processStats.operationTime = GetTimeDiff(dwStartTime); /* Write to a file*/ if(pFile!= NULL) { for( i = 0; i < THREAD_COUNT; i++ ) { buffer = (struct statistics *)resultBuffer->getResultBuffer(i); returnCode = fprintf(pFile, "%d,%d,%d,%d,%lu,%d\n", buffer->processId, buffer->operationsFailed, buffer->operationsPassed, buffer->operationsTotal, buffer->operationTime, buffer->relationId ); } } fclose(pFile); fprintf(pProcessFile, "%d,%d,%d\n", USE_PROCESS_COUNT, processStats.operationTime, processStats.relationId ); fclose(pProcessFile); /* Logging for the test case over, clean up the handles */ for( i = 0; i < THREAD_COUNT; i++ ) { if(!CloseHandle(hThread[i]) ) { Trace("Error:%d: CloseHandle failed for Process [%d] hThread[%d]\n", GetLastError(), USE_PROCESS_COUNT, i); testStatus = FAIL; } } if(!CloseHandle(StartTestsEvHandle)) { Trace("Error:%d: CloseHandle failed for Process [%d] StartTestsEvHandle\n", GetLastError(), USE_PROCESS_COUNT); testStatus = FAIL; } if(!CloseHandle(hMutexHandle)) { Trace("Error:%d: CloseHandle failed for Process [%d] hMutexHandle\n", GetLastError(), USE_PROCESS_COUNT); testStatus = FAIL; } PAL_Terminate(); return testStatus; } void PALAPI Run_Thread_mutex_nonshared (LPVOID lpParam) { unsigned int i = 0; DWORD dwWaitResult; struct statistics stats; DWORD dwStartTime; stats.relationId = RELATION_ID; stats.processId = USE_PROCESS_COUNT; stats.operationsFailed = 0; stats.operationsPassed = 0; stats.operationsTotal = 0; stats.operationTime = 0; int Id=(int)lpParam; dwWaitResult = WaitForSingleObject( StartTestsEvHandle, // handle to mutex TIMEOUT); if(dwWaitResult != WAIT_OBJECT_0) { Trace("Error while waiting for StartTest Event@ thread %d\n", Id); testStatus = FAIL; } dwStartTime = GetTickCount(); for( i = 0; i < REPEAT_COUNT; i++ ) { dwWaitResult = WaitForSingleObject( hMutexHandle, // handle to mutex TIMEOUT); if(dwWaitResult != WAIT_OBJECT_0) { stats.operationsFailed += 1; stats.operationsTotal += 1; testStatus = FAIL; continue; } if (! ReleaseMutex(hMutexHandle)) { // Deal with error. stats.operationsFailed += 1; stats.operationsTotal += 1; // Probably need to have while true loop to attempt to release mutex... testStatus = FAIL; continue; } stats.operationsTotal += 1; stats.operationsPassed += 1; } stats.operationTime = GetTimeDiff(dwStartTime); if(resultBuffer->LogResult(Id, (char *)&stats)) { Fail("Error:%d: while writing to shared memory, Thread Id is[%d] and Process id is [%d]\n", GetLastError(), Id, USE_PROCESS_COUNT); } }
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/pal/tests/palsuite/miscellaneous/InterlockedBit/test2/test.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source : test.c ** ** Purpose: Test for InterlockedBitTestAndSet() function ** ** **=========================================================*/ #include <palsuite.h> typedef struct tag_TEST_DATA { LONG baseValue; UINT bitPosition; LONG expectedValue; UCHAR expectedReturnValue; } TEST_DATA; PALTEST(miscellaneous_InterlockedBit_test2_paltest_interlockedbit_test2, "miscellaneous/InterlockedBit/test2/paltest_interlockedbit_test2") { TEST_DATA test_data[] = { { (LONG)0x00000000, 2, (LONG)0x00000004, 0 }, { (LONG)0x12341234, 2, (LONG)0x12341234, 1 }, { (LONG)0x12341234, 3, (LONG)0x1234123c, 0 }, { (LONG)0x12341234, 31, (LONG)0x92341234, 0 }, { (LONG)0x12341234, 28, (LONG)0x12341234, 1 }, { (LONG)0xffffffff, 28, (LONG)0xffffffff, 1 } }; /* * Initialize the PAL and return FAILURE if this fails */ if(0 != (PAL_Initialize(argc, argv))) { return FAIL; } for (int i = 0; i < sizeof (test_data) / sizeof (TEST_DATA); i++) { LONG baseVal = test_data[i].baseValue; LONG bitPosition = test_data[i].bitPosition; UCHAR ret = InterlockedBitTestAndSet( &baseVal, /* Variable to manipulate */ bitPosition); if (ret != test_data[i].expectedReturnValue) { Fail("ERROR: InterlockedBitTestAndSet(%d): Expected return value is %d," "Actual return value is %d.", i, test_data[i].expectedReturnValue, ret); } if (baseVal != test_data[i].expectedValue) { Fail("ERROR: InterlockedBitTestAndSet(%d): Expected value is %x," "Actual value is %x.", i, test_data[i].expectedValue, baseVal); } } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source : test.c ** ** Purpose: Test for InterlockedBitTestAndSet() function ** ** **=========================================================*/ #include <palsuite.h> typedef struct tag_TEST_DATA { LONG baseValue; UINT bitPosition; LONG expectedValue; UCHAR expectedReturnValue; } TEST_DATA; PALTEST(miscellaneous_InterlockedBit_test2_paltest_interlockedbit_test2, "miscellaneous/InterlockedBit/test2/paltest_interlockedbit_test2") { TEST_DATA test_data[] = { { (LONG)0x00000000, 2, (LONG)0x00000004, 0 }, { (LONG)0x12341234, 2, (LONG)0x12341234, 1 }, { (LONG)0x12341234, 3, (LONG)0x1234123c, 0 }, { (LONG)0x12341234, 31, (LONG)0x92341234, 0 }, { (LONG)0x12341234, 28, (LONG)0x12341234, 1 }, { (LONG)0xffffffff, 28, (LONG)0xffffffff, 1 } }; /* * Initialize the PAL and return FAILURE if this fails */ if(0 != (PAL_Initialize(argc, argv))) { return FAIL; } for (int i = 0; i < sizeof (test_data) / sizeof (TEST_DATA); i++) { LONG baseVal = test_data[i].baseValue; LONG bitPosition = test_data[i].bitPosition; UCHAR ret = InterlockedBitTestAndSet( &baseVal, /* Variable to manipulate */ bitPosition); if (ret != test_data[i].expectedReturnValue) { Fail("ERROR: InterlockedBitTestAndSet(%d): Expected return value is %d," "Actual return value is %d.", i, test_data[i].expectedReturnValue, ret); } if (baseVal != test_data[i].expectedValue) { Fail("ERROR: InterlockedBitTestAndSet(%d): Expected value is %x," "Actual value is %x.", i, test_data[i].expectedValue, baseVal); } } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/native/libs/System.Native/pal_autoreleasepool.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include "pal_compiler.h" #include "pal_types.h" /** * Creates an pool to automatically release applicable ref-counted resources. */ PALEXPORT void* SystemNative_CreateAutoreleasePool(void); /** * Drains and releases a pool created by SystemNative_CreateAutoreleasePool. */ PALEXPORT void SystemNative_DrainAutoreleasePool(void* pool); /** * Ensure that NSThread is in multi-threading mode when POSIX APIs are used to * start new threads. */ void EnsureNSThreadIsMultiThreaded(void);
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include "pal_compiler.h" #include "pal_types.h" /** * Creates an pool to automatically release applicable ref-counted resources. */ PALEXPORT void* SystemNative_CreateAutoreleasePool(void); /** * Drains and releases a pool created by SystemNative_CreateAutoreleasePool. */ PALEXPORT void SystemNative_DrainAutoreleasePool(void* pool); /** * Ensure that NSThread is in multi-threading mode when POSIX APIs are used to * start new threads. */ void EnsureNSThreadIsMultiThreaded(void);
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/pal/tests/palsuite/threading/WaitForSingleObject/WFSOMutexTest/WFSOMutexTest.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source: WFSOMutexTest.c ** ** Purpose: Test for WaitForSingleObjectTest. ** Create Mutex Object ** Create Two Threads, Each Threads does WFSO for the Mutex Object ** Increments Counter ** Releases Mutex ** Test Passes if the above operations are successful ** ** ** **=========================================================*/ #include <palsuite.h> #define NUMBER_OF_WORKER_THREADS 2 //Declaring Variables HANDLE hMutex_WFSOMutexTest = NULL; unsigned int globalcounter_WFSOMutexTest =0; int testReturnCode_WFSOMutexTest = PASS; //Declaring Function Prototypes DWORD PALAPI WFSOMutexTest(LPVOID params); void incrementCounter_WFSOMutexTest(void); PALTEST(threading_WaitForSingleObject_WFSOMutexTest_paltest_waitforsingleobject_wfsomutextest, "threading/WaitForSingleObject/WFSOMutexTest/paltest_waitforsingleobject_wfsomutextest") { //Declare local variables int i =0; // 2 dimensional array to hold thread handles for each worker thread HANDLE hThread[NUMBER_OF_WORKER_THREADS]; DWORD dwThreadId=0; int returnCode = 0; //Initialize PAL if(0 != (PAL_Initialize(argc, argv))) { return ( FAIL ); } //Create Mutex hMutex_WFSOMutexTest = CreateMutex(NULL, // no security attributes FALSE, // initially not owned NULL); // name of mutex //Check for Mutex Creation if (hMutex_WFSOMutexTest == NULL) { Fail("Create Mutex Failed, GetLastError: %d\n", GetLastError()); } //Spawn 2 worker threads for (i=0;i<NUMBER_OF_WORKER_THREADS;i++) { //Create Thread hThread[i] = CreateThread( NULL, 0, WFSOMutexTest, NULL, 0, &dwThreadId); if ( NULL == hThread[i] ) { Fail ( "CreateThread() returned NULL. Failing test.\n" "GetLastError returned %d\n", GetLastError()); } } /* Test running */ returnCode = WaitForMultipleObjects( NUMBER_OF_WORKER_THREADS, hThread, TRUE, 5000); if( WAIT_OBJECT_0 != returnCode ) { Trace("Wait for Object(s) returned %d, and GetLastError value is %d\n", returnCode, GetLastError()); testReturnCode_WFSOMutexTest = FAIL; } //Close thread handles for (i=0;i<NUMBER_OF_WORKER_THREADS;i++) { if (0==CloseHandle(hThread[i])) { Trace("Could not Close thread handle\n"); Fail ( "GetLastError returned %d\n", GetLastError()); } } //Close Mutex Handle if (0==CloseHandle(hMutex_WFSOMutexTest)) { Trace("Could not close mutex handle\n"); Fail ( "GetLastError returned %d\n", GetLastError()); } PAL_TerminateEx(testReturnCode_WFSOMutexTest); return ( testReturnCode_WFSOMutexTest ); } void incrementCounter_WFSOMutexTest(void) { if (INT_MAX == globalcounter_WFSOMutexTest) { globalcounter_WFSOMutexTest = 0; } globalcounter_WFSOMutexTest++; Trace("Global Counter Value: %d \n", globalcounter_WFSOMutexTest); } DWORD PALAPI WFSOMutexTest(LPVOID params) { DWORD dwWaitResult; // Request ownership of mutex. dwWaitResult = WaitForSingleObject( hMutex_WFSOMutexTest, // handle to mutex 5000L); // five-second time-out interval switch (dwWaitResult) { // The thread got mutex ownership. case WAIT_OBJECT_0: { incrementCounter_WFSOMutexTest(); //Release ownership of the mutex object. if (! ReleaseMutex(hMutex_WFSOMutexTest)) { Fail ( "ReleaseMutex() returned NULL. Failing test.\n" "GetLastError returned %d\n", GetLastError()); } break; } // Cannot get mutex ownership due to time-out. case WAIT_TIMEOUT: { Fail ( "Cannot get mutex ownership due to time-out. Failing test.\n" "GetLastError returned %d\n", GetLastError()); return FALSE; } // Got ownership of the abandoned mutex object. case WAIT_ABANDONED: { Fail ( "Got ownership of the abandoned mutex object. Failing test.\n" "GetLastError returned %d\n", GetLastError()); return FALSE; } } return 1; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source: WFSOMutexTest.c ** ** Purpose: Test for WaitForSingleObjectTest. ** Create Mutex Object ** Create Two Threads, Each Threads does WFSO for the Mutex Object ** Increments Counter ** Releases Mutex ** Test Passes if the above operations are successful ** ** ** **=========================================================*/ #include <palsuite.h> #define NUMBER_OF_WORKER_THREADS 2 //Declaring Variables HANDLE hMutex_WFSOMutexTest = NULL; unsigned int globalcounter_WFSOMutexTest =0; int testReturnCode_WFSOMutexTest = PASS; //Declaring Function Prototypes DWORD PALAPI WFSOMutexTest(LPVOID params); void incrementCounter_WFSOMutexTest(void); PALTEST(threading_WaitForSingleObject_WFSOMutexTest_paltest_waitforsingleobject_wfsomutextest, "threading/WaitForSingleObject/WFSOMutexTest/paltest_waitforsingleobject_wfsomutextest") { //Declare local variables int i =0; // 2 dimensional array to hold thread handles for each worker thread HANDLE hThread[NUMBER_OF_WORKER_THREADS]; DWORD dwThreadId=0; int returnCode = 0; //Initialize PAL if(0 != (PAL_Initialize(argc, argv))) { return ( FAIL ); } //Create Mutex hMutex_WFSOMutexTest = CreateMutex(NULL, // no security attributes FALSE, // initially not owned NULL); // name of mutex //Check for Mutex Creation if (hMutex_WFSOMutexTest == NULL) { Fail("Create Mutex Failed, GetLastError: %d\n", GetLastError()); } //Spawn 2 worker threads for (i=0;i<NUMBER_OF_WORKER_THREADS;i++) { //Create Thread hThread[i] = CreateThread( NULL, 0, WFSOMutexTest, NULL, 0, &dwThreadId); if ( NULL == hThread[i] ) { Fail ( "CreateThread() returned NULL. Failing test.\n" "GetLastError returned %d\n", GetLastError()); } } /* Test running */ returnCode = WaitForMultipleObjects( NUMBER_OF_WORKER_THREADS, hThread, TRUE, 5000); if( WAIT_OBJECT_0 != returnCode ) { Trace("Wait for Object(s) returned %d, and GetLastError value is %d\n", returnCode, GetLastError()); testReturnCode_WFSOMutexTest = FAIL; } //Close thread handles for (i=0;i<NUMBER_OF_WORKER_THREADS;i++) { if (0==CloseHandle(hThread[i])) { Trace("Could not Close thread handle\n"); Fail ( "GetLastError returned %d\n", GetLastError()); } } //Close Mutex Handle if (0==CloseHandle(hMutex_WFSOMutexTest)) { Trace("Could not close mutex handle\n"); Fail ( "GetLastError returned %d\n", GetLastError()); } PAL_TerminateEx(testReturnCode_WFSOMutexTest); return ( testReturnCode_WFSOMutexTest ); } void incrementCounter_WFSOMutexTest(void) { if (INT_MAX == globalcounter_WFSOMutexTest) { globalcounter_WFSOMutexTest = 0; } globalcounter_WFSOMutexTest++; Trace("Global Counter Value: %d \n", globalcounter_WFSOMutexTest); } DWORD PALAPI WFSOMutexTest(LPVOID params) { DWORD dwWaitResult; // Request ownership of mutex. dwWaitResult = WaitForSingleObject( hMutex_WFSOMutexTest, // handle to mutex 5000L); // five-second time-out interval switch (dwWaitResult) { // The thread got mutex ownership. case WAIT_OBJECT_0: { incrementCounter_WFSOMutexTest(); //Release ownership of the mutex object. if (! ReleaseMutex(hMutex_WFSOMutexTest)) { Fail ( "ReleaseMutex() returned NULL. Failing test.\n" "GetLastError returned %d\n", GetLastError()); } break; } // Cannot get mutex ownership due to time-out. case WAIT_TIMEOUT: { Fail ( "Cannot get mutex ownership due to time-out. Failing test.\n" "GetLastError returned %d\n", GetLastError()); return FALSE; } // Got ownership of the abandoned mutex object. case WAIT_ABANDONED: { Fail ( "Got ownership of the abandoned mutex object. Failing test.\n" "GetLastError returned %d\n", GetLastError()); return FALSE; } } return 1; }
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/pal/src/libunwind/include/tdep-aarch64/libunwind_i.h
/* libunwind - a platform-independent unwind library Copyright (C) 2001-2005 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> Copyright (C) 2013 Linaro Limited This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef AARCH64_LIBUNWIND_I_H #define AARCH64_LIBUNWIND_I_H /* Target-dependent definitions that are internal to libunwind but need to be shared with target-independent code. */ #include <stdlib.h> #include <libunwind.h> #include <stdatomic.h> #include "elf64.h" #include "mempool.h" #include "dwarf.h" typedef enum { UNW_AARCH64_FRAME_STANDARD = -2, /* regular fp, sp +/- offset */ UNW_AARCH64_FRAME_SIGRETURN = -1, /* special sigreturn frame */ UNW_AARCH64_FRAME_OTHER = 0, /* not cacheable (special or unrecognised) */ UNW_AARCH64_FRAME_GUESSED = 1 /* guessed it was regular, but not known */ } unw_tdep_frame_type_t; typedef struct { uint64_t virtual_address; int64_t frame_type : 2; /* unw_tdep_frame_type_t classification */ int64_t last_frame : 1; /* non-zero if last frame in chain */ int64_t cfa_reg_sp : 1; /* cfa dwarf base register is sp vs. fp */ int64_t cfa_reg_offset : 30; /* cfa is at this offset from base register value */ int64_t fp_cfa_offset : 30; /* fp saved at this offset from cfa (-1 = not saved) */ int64_t lr_cfa_offset : 30; /* lr saved at this offset from cfa (-1 = not saved) */ int64_t sp_cfa_offset : 30; /* sp saved at this offset from cfa (-1 = not saved) */ } unw_tdep_frame_t; #ifdef UNW_LOCAL_ONLY typedef unw_word_t aarch64_loc_t; #else /* !UNW_LOCAL_ONLY */ typedef struct aarch64_loc { unw_word_t w0, w1; } aarch64_loc_t; #endif /* !UNW_LOCAL_ONLY */ struct unw_addr_space { struct unw_accessors acc; int big_endian; unw_caching_policy_t caching_policy; _Atomic uint32_t cache_generation; unw_word_t dyn_generation; /* see dyn-common.h */ unw_word_t dyn_info_list_addr; /* (cached) dyn_info_list_addr */ struct dwarf_rs_cache global_cache; struct unw_debug_frame_list *debug_frames; }; struct cursor { struct dwarf_cursor dwarf; /* must be first */ unw_tdep_frame_t frame_info; /* quick tracing assist info */ enum { AARCH64_SCF_NONE, AARCH64_SCF_LINUX_RT_SIGFRAME, } sigcontext_format; unw_word_t sigcontext_addr; unw_word_t sigcontext_sp; unw_word_t sigcontext_pc; int validate; ucontext_t *uc; }; static inline ucontext_t * dwarf_get_uc(const struct dwarf_cursor *cursor) { const struct cursor *c = (struct cursor *) cursor->as_arg; return c->uc; } #define DWARF_GET_LOC(l) ((l).val) #ifdef UNW_LOCAL_ONLY # define DWARF_NULL_LOC DWARF_LOC (0, 0) # define DWARF_IS_NULL_LOC(l) (DWARF_GET_LOC (l) == 0) # define DWARF_LOC(r, t) ((dwarf_loc_t) { .val = (r) }) # define DWARF_IS_REG_LOC(l) 0 # define DWARF_REG_LOC(c,r) (DWARF_LOC((unw_word_t) \ tdep_uc_addr(dwarf_get_uc(c), (r)), 0)) # define DWARF_MEM_LOC(c,m) DWARF_LOC ((m), 0) # define DWARF_FPREG_LOC(c,r) (DWARF_LOC((unw_word_t) \ tdep_uc_addr(dwarf_get_uc(c), (r)), 0)) static inline int dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val) { if (!DWARF_GET_LOC (loc)) return -1; *val = *(unw_fpreg_t *) DWARF_GET_LOC (loc); return 0; } static inline int dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val) { if (!DWARF_GET_LOC (loc)) return -1; *(unw_fpreg_t *) DWARF_GET_LOC (loc) = val; return 0; } static inline int dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val) { if (!DWARF_GET_LOC (loc)) return -1; *val = *(unw_word_t *) DWARF_GET_LOC (loc); return 0; } static inline int dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val) { if (!DWARF_GET_LOC (loc)) return -1; *(unw_word_t *) DWARF_GET_LOC (loc) = val; return 0; } #else /* !UNW_LOCAL_ONLY */ # define DWARF_LOC_TYPE_FP (1 << 0) # define DWARF_LOC_TYPE_REG (1 << 1) # define DWARF_NULL_LOC DWARF_LOC (0, 0) static inline int dwarf_is_null_loc(dwarf_loc_t l) { return l.val == 0 && l.type == 0; } # define DWARF_IS_NULL_LOC(l) dwarf_is_null_loc(l) # define DWARF_LOC(r, t) ((dwarf_loc_t) { .val = (r), .type = (t) }) # define DWARF_IS_REG_LOC(l) (((l).type & DWARF_LOC_TYPE_REG) != 0) # define DWARF_IS_FP_LOC(l) (((l).type & DWARF_LOC_TYPE_FP) != 0) # define DWARF_REG_LOC(c,r) DWARF_LOC((r), DWARF_LOC_TYPE_REG) # define DWARF_MEM_LOC(c,m) DWARF_LOC ((m), 0) # define DWARF_FPREG_LOC(c,r) DWARF_LOC((r), (DWARF_LOC_TYPE_REG \ | DWARF_LOC_TYPE_FP)) static inline int dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val) { char *valp = (char *) &val; unw_word_t addr; int ret; if (DWARF_IS_NULL_LOC (loc)) return -UNW_EBADREG; if (DWARF_IS_REG_LOC (loc)) return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), val, 0, c->as_arg); addr = DWARF_GET_LOC (loc); if ((ret = (*c->as->acc.access_mem) (c->as, addr + 0, (unw_word_t *) valp, 0, c->as_arg)) < 0) return ret; return (*c->as->acc.access_mem) (c->as, addr + 4, (unw_word_t *) valp + 1, 0, c->as_arg); } static inline int dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val) { char *valp = (char *) &val; unw_word_t addr; int ret; if (DWARF_IS_NULL_LOC (loc)) return -UNW_EBADREG; if (DWARF_IS_REG_LOC (loc)) return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), &val, 1, c->as_arg); addr = DWARF_GET_LOC (loc); if ((ret = (*c->as->acc.access_mem) (c->as, addr + 0, (unw_word_t *) valp, 1, c->as_arg)) < 0) return ret; return (*c->as->acc.access_mem) (c->as, addr + 4, (unw_word_t *) valp + 1, 1, c->as_arg); } static inline int dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val) { if (DWARF_IS_NULL_LOC (loc)) return -UNW_EBADREG; /* If a code-generator were to save a value of type unw_word_t in a floating-point register, we would have to support this case. I suppose it could happen with MMX registers, but does it really happen? */ assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), val, 0, c->as_arg); else return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val, 0, c->as_arg); } static inline int dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val) { if (DWARF_IS_NULL_LOC (loc)) return -UNW_EBADREG; /* If a code-generator were to save a value of type unw_word_t in a floating-point register, we would have to support this case. I suppose it could happen with MMX registers, but does it really happen? */ assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), &val, 1, c->as_arg); else return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), &val, 1, c->as_arg); } #endif /* !UNW_LOCAL_ONLY */ #define tdep_getcontext_trace UNW_ARCH_OBJ(getcontext_trace) #define tdep_init_done UNW_OBJ(init_done) #define tdep_init_mem_validate UNW_OBJ(init_mem_validate) #define tdep_init UNW_OBJ(init) /* Platforms that support UNW_INFO_FORMAT_TABLE need to define tdep_search_unwind_table. */ #define tdep_search_unwind_table dwarf_search_unwind_table #define tdep_find_unwind_table dwarf_find_unwind_table #define tdep_uc_addr UNW_OBJ(uc_addr) #define tdep_get_elf_image UNW_ARCH_OBJ(get_elf_image) #define tdep_get_exe_image_path UNW_ARCH_OBJ(get_exe_image_path) #define tdep_access_reg UNW_OBJ(access_reg) #define tdep_access_fpreg UNW_OBJ(access_fpreg) #define tdep_fetch_frame(c,ip,n) do {} while(0) #define tdep_cache_frame(c) 0 #define tdep_reuse_frame(c,frame) do {} while(0) #define tdep_stash_frame UNW_OBJ(tdep_stash_frame) #define tdep_trace UNW_OBJ(tdep_trace) #ifdef UNW_LOCAL_ONLY # define tdep_find_proc_info(c,ip,n) \ dwarf_find_proc_info((c)->as, (ip), &(c)->pi, (n), \ (c)->as_arg) # define tdep_put_unwind_info(as,pi,arg) \ dwarf_put_unwind_info((as), (pi), (arg)) #else # define tdep_find_proc_info(c,ip,n) \ (*(c)->as->acc.find_proc_info)((c)->as, (ip), &(c)->pi, (n), \ (c)->as_arg) # define tdep_put_unwind_info(as,pi,arg) \ (*(as)->acc.put_unwind_info)((as), (pi), (arg)) #endif #define tdep_get_as(c) ((c)->dwarf.as) #define tdep_get_as_arg(c) ((c)->dwarf.as_arg) #define tdep_get_ip(c) ((c)->dwarf.ip) #define tdep_big_endian(as) ((as)->big_endian) extern atomic_bool tdep_init_done; extern void tdep_init (void); extern void tdep_init_mem_validate (void); extern int tdep_search_unwind_table (unw_addr_space_t as, unw_word_t ip, unw_dyn_info_t *di, unw_proc_info_t *pi, int need_unwind_info, void *arg); extern void *tdep_uc_addr (unw_tdep_context_t *uc, int reg); extern int tdep_get_elf_image (struct elf_image *ei, pid_t pid, unw_word_t ip, unsigned long *segbase, unsigned long *mapoff, char *path, size_t pathlen); extern void tdep_get_exe_image_path (char *path); extern int tdep_access_reg (struct cursor *c, unw_regnum_t reg, unw_word_t *valp, int write); extern int tdep_access_fpreg (struct cursor *c, unw_regnum_t reg, unw_fpreg_t *valp, int write); extern int tdep_trace (unw_cursor_t *cursor, void **addresses, int *n); extern void tdep_stash_frame (struct dwarf_cursor *c, struct dwarf_reg_state *rs); extern int tdep_getcontext_trace (unw_tdep_context_t *); #endif /* AARCH64_LIBUNWIND_I_H */
/* libunwind - a platform-independent unwind library Copyright (C) 2001-2005 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> Copyright (C) 2013 Linaro Limited This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef AARCH64_LIBUNWIND_I_H #define AARCH64_LIBUNWIND_I_H /* Target-dependent definitions that are internal to libunwind but need to be shared with target-independent code. */ #include <stdlib.h> #include <libunwind.h> #include <stdatomic.h> #include "elf64.h" #include "mempool.h" #include "dwarf.h" typedef enum { UNW_AARCH64_FRAME_STANDARD = -2, /* regular fp, sp +/- offset */ UNW_AARCH64_FRAME_SIGRETURN = -1, /* special sigreturn frame */ UNW_AARCH64_FRAME_OTHER = 0, /* not cacheable (special or unrecognised) */ UNW_AARCH64_FRAME_GUESSED = 1 /* guessed it was regular, but not known */ } unw_tdep_frame_type_t; typedef struct { uint64_t virtual_address; int64_t frame_type : 2; /* unw_tdep_frame_type_t classification */ int64_t last_frame : 1; /* non-zero if last frame in chain */ int64_t cfa_reg_sp : 1; /* cfa dwarf base register is sp vs. fp */ int64_t cfa_reg_offset : 30; /* cfa is at this offset from base register value */ int64_t fp_cfa_offset : 30; /* fp saved at this offset from cfa (-1 = not saved) */ int64_t lr_cfa_offset : 30; /* lr saved at this offset from cfa (-1 = not saved) */ int64_t sp_cfa_offset : 30; /* sp saved at this offset from cfa (-1 = not saved) */ } unw_tdep_frame_t; #ifdef UNW_LOCAL_ONLY typedef unw_word_t aarch64_loc_t; #else /* !UNW_LOCAL_ONLY */ typedef struct aarch64_loc { unw_word_t w0, w1; } aarch64_loc_t; #endif /* !UNW_LOCAL_ONLY */ struct unw_addr_space { struct unw_accessors acc; int big_endian; unw_caching_policy_t caching_policy; _Atomic uint32_t cache_generation; unw_word_t dyn_generation; /* see dyn-common.h */ unw_word_t dyn_info_list_addr; /* (cached) dyn_info_list_addr */ struct dwarf_rs_cache global_cache; struct unw_debug_frame_list *debug_frames; }; struct cursor { struct dwarf_cursor dwarf; /* must be first */ unw_tdep_frame_t frame_info; /* quick tracing assist info */ enum { AARCH64_SCF_NONE, AARCH64_SCF_LINUX_RT_SIGFRAME, } sigcontext_format; unw_word_t sigcontext_addr; unw_word_t sigcontext_sp; unw_word_t sigcontext_pc; int validate; ucontext_t *uc; }; static inline ucontext_t * dwarf_get_uc(const struct dwarf_cursor *cursor) { const struct cursor *c = (struct cursor *) cursor->as_arg; return c->uc; } #define DWARF_GET_LOC(l) ((l).val) #ifdef UNW_LOCAL_ONLY # define DWARF_NULL_LOC DWARF_LOC (0, 0) # define DWARF_IS_NULL_LOC(l) (DWARF_GET_LOC (l) == 0) # define DWARF_LOC(r, t) ((dwarf_loc_t) { .val = (r) }) # define DWARF_IS_REG_LOC(l) 0 # define DWARF_REG_LOC(c,r) (DWARF_LOC((unw_word_t) \ tdep_uc_addr(dwarf_get_uc(c), (r)), 0)) # define DWARF_MEM_LOC(c,m) DWARF_LOC ((m), 0) # define DWARF_FPREG_LOC(c,r) (DWARF_LOC((unw_word_t) \ tdep_uc_addr(dwarf_get_uc(c), (r)), 0)) static inline int dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val) { if (!DWARF_GET_LOC (loc)) return -1; *val = *(unw_fpreg_t *) DWARF_GET_LOC (loc); return 0; } static inline int dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val) { if (!DWARF_GET_LOC (loc)) return -1; *(unw_fpreg_t *) DWARF_GET_LOC (loc) = val; return 0; } static inline int dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val) { if (!DWARF_GET_LOC (loc)) return -1; *val = *(unw_word_t *) DWARF_GET_LOC (loc); return 0; } static inline int dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val) { if (!DWARF_GET_LOC (loc)) return -1; *(unw_word_t *) DWARF_GET_LOC (loc) = val; return 0; } #else /* !UNW_LOCAL_ONLY */ # define DWARF_LOC_TYPE_FP (1 << 0) # define DWARF_LOC_TYPE_REG (1 << 1) # define DWARF_NULL_LOC DWARF_LOC (0, 0) static inline int dwarf_is_null_loc(dwarf_loc_t l) { return l.val == 0 && l.type == 0; } # define DWARF_IS_NULL_LOC(l) dwarf_is_null_loc(l) # define DWARF_LOC(r, t) ((dwarf_loc_t) { .val = (r), .type = (t) }) # define DWARF_IS_REG_LOC(l) (((l).type & DWARF_LOC_TYPE_REG) != 0) # define DWARF_IS_FP_LOC(l) (((l).type & DWARF_LOC_TYPE_FP) != 0) # define DWARF_REG_LOC(c,r) DWARF_LOC((r), DWARF_LOC_TYPE_REG) # define DWARF_MEM_LOC(c,m) DWARF_LOC ((m), 0) # define DWARF_FPREG_LOC(c,r) DWARF_LOC((r), (DWARF_LOC_TYPE_REG \ | DWARF_LOC_TYPE_FP)) static inline int dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val) { char *valp = (char *) &val; unw_word_t addr; int ret; if (DWARF_IS_NULL_LOC (loc)) return -UNW_EBADREG; if (DWARF_IS_REG_LOC (loc)) return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), val, 0, c->as_arg); addr = DWARF_GET_LOC (loc); if ((ret = (*c->as->acc.access_mem) (c->as, addr + 0, (unw_word_t *) valp, 0, c->as_arg)) < 0) return ret; return (*c->as->acc.access_mem) (c->as, addr + 4, (unw_word_t *) valp + 1, 0, c->as_arg); } static inline int dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val) { char *valp = (char *) &val; unw_word_t addr; int ret; if (DWARF_IS_NULL_LOC (loc)) return -UNW_EBADREG; if (DWARF_IS_REG_LOC (loc)) return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), &val, 1, c->as_arg); addr = DWARF_GET_LOC (loc); if ((ret = (*c->as->acc.access_mem) (c->as, addr + 0, (unw_word_t *) valp, 1, c->as_arg)) < 0) return ret; return (*c->as->acc.access_mem) (c->as, addr + 4, (unw_word_t *) valp + 1, 1, c->as_arg); } static inline int dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val) { if (DWARF_IS_NULL_LOC (loc)) return -UNW_EBADREG; /* If a code-generator were to save a value of type unw_word_t in a floating-point register, we would have to support this case. I suppose it could happen with MMX registers, but does it really happen? */ assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), val, 0, c->as_arg); else return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val, 0, c->as_arg); } static inline int dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val) { if (DWARF_IS_NULL_LOC (loc)) return -UNW_EBADREG; /* If a code-generator were to save a value of type unw_word_t in a floating-point register, we would have to support this case. I suppose it could happen with MMX registers, but does it really happen? */ assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), &val, 1, c->as_arg); else return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), &val, 1, c->as_arg); } #endif /* !UNW_LOCAL_ONLY */ #define tdep_getcontext_trace UNW_ARCH_OBJ(getcontext_trace) #define tdep_init_done UNW_OBJ(init_done) #define tdep_init_mem_validate UNW_OBJ(init_mem_validate) #define tdep_init UNW_OBJ(init) /* Platforms that support UNW_INFO_FORMAT_TABLE need to define tdep_search_unwind_table. */ #define tdep_search_unwind_table dwarf_search_unwind_table #define tdep_find_unwind_table dwarf_find_unwind_table #define tdep_uc_addr UNW_OBJ(uc_addr) #define tdep_get_elf_image UNW_ARCH_OBJ(get_elf_image) #define tdep_get_exe_image_path UNW_ARCH_OBJ(get_exe_image_path) #define tdep_access_reg UNW_OBJ(access_reg) #define tdep_access_fpreg UNW_OBJ(access_fpreg) #define tdep_fetch_frame(c,ip,n) do {} while(0) #define tdep_cache_frame(c) 0 #define tdep_reuse_frame(c,frame) do {} while(0) #define tdep_stash_frame UNW_OBJ(tdep_stash_frame) #define tdep_trace UNW_OBJ(tdep_trace) #ifdef UNW_LOCAL_ONLY # define tdep_find_proc_info(c,ip,n) \ dwarf_find_proc_info((c)->as, (ip), &(c)->pi, (n), \ (c)->as_arg) # define tdep_put_unwind_info(as,pi,arg) \ dwarf_put_unwind_info((as), (pi), (arg)) #else # define tdep_find_proc_info(c,ip,n) \ (*(c)->as->acc.find_proc_info)((c)->as, (ip), &(c)->pi, (n), \ (c)->as_arg) # define tdep_put_unwind_info(as,pi,arg) \ (*(as)->acc.put_unwind_info)((as), (pi), (arg)) #endif #define tdep_get_as(c) ((c)->dwarf.as) #define tdep_get_as_arg(c) ((c)->dwarf.as_arg) #define tdep_get_ip(c) ((c)->dwarf.ip) #define tdep_big_endian(as) ((as)->big_endian) extern atomic_bool tdep_init_done; extern void tdep_init (void); extern void tdep_init_mem_validate (void); extern int tdep_search_unwind_table (unw_addr_space_t as, unw_word_t ip, unw_dyn_info_t *di, unw_proc_info_t *pi, int need_unwind_info, void *arg); extern void *tdep_uc_addr (unw_tdep_context_t *uc, int reg); extern int tdep_get_elf_image (struct elf_image *ei, pid_t pid, unw_word_t ip, unsigned long *segbase, unsigned long *mapoff, char *path, size_t pathlen); extern void tdep_get_exe_image_path (char *path); extern int tdep_access_reg (struct cursor *c, unw_regnum_t reg, unw_word_t *valp, int write); extern int tdep_access_fpreg (struct cursor *c, unw_regnum_t reg, unw_fpreg_t *valp, int write); extern int tdep_trace (unw_cursor_t *cursor, void **addresses, int *n); extern void tdep_stash_frame (struct dwarf_cursor *c, struct dwarf_reg_state *rs); extern int tdep_getcontext_trace (unw_tdep_context_t *); #endif /* AARCH64_LIBUNWIND_I_H */
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/native/public/mono/metadata/object.h
/** * \file */ #ifndef _MONO_CLI_OBJECT_H_ #define _MONO_CLI_OBJECT_H_ #include <mono/metadata/details/object-types.h> MONO_BEGIN_DECLS #define MONO_API_FUNCTION(ret,name,args) MONO_API ret name args; #include <mono/metadata/details/object-functions.h> #undef MONO_API_FUNCTION #define MONO_OBJECT_SETREF(obj,fieldname,value) do { \ mono_gc_wbarrier_set_field ((MonoObject*)(obj), &((obj)->fieldname), (MonoObject*)value); \ /*(obj)->fieldname = (value);*/ \ } while (0) /* This should be used if 's' can reside on the heap */ #define MONO_STRUCT_SETREF(s,field,value) do { \ mono_gc_wbarrier_generic_store (&((s)->field), (MonoObject*)(value)); \ } while (0) #define mono_array_addr(array,type,index) ((type*)mono_array_addr_with_size ((array), sizeof (type), (index))) #define mono_array_get(array,type,index) ( *(type*)mono_array_addr ((array), type, (index)) ) #define mono_array_set(array,type,index,value) \ do { \ type *__p = (type *) mono_array_addr ((array), type, (index)); \ *__p = (value); \ } while (0) #define mono_array_setref(array,index,value) \ do { \ void **__p = (void **) mono_array_addr ((array), void*, (index)); \ mono_gc_wbarrier_set_arrayref ((array), __p, (MonoObject*)(value)); \ /* *__p = (value);*/ \ } while (0) #define mono_array_memcpy_refs(dest,destidx,src,srcidx,count) \ do { \ void **__p = (void **) mono_array_addr ((dest), void*, (destidx)); \ void **__s = mono_array_addr ((src), void*, (srcidx)); \ mono_gc_wbarrier_arrayref_copy (__p, __s, (count)); \ } while (0) MONO_END_DECLS #endif
/** * \file */ #ifndef _MONO_CLI_OBJECT_H_ #define _MONO_CLI_OBJECT_H_ #include <mono/metadata/details/object-types.h> MONO_BEGIN_DECLS #define MONO_API_FUNCTION(ret,name,args) MONO_API ret name args; #include <mono/metadata/details/object-functions.h> #undef MONO_API_FUNCTION #define MONO_OBJECT_SETREF(obj,fieldname,value) do { \ mono_gc_wbarrier_set_field ((MonoObject*)(obj), &((obj)->fieldname), (MonoObject*)value); \ /*(obj)->fieldname = (value);*/ \ } while (0) /* This should be used if 's' can reside on the heap */ #define MONO_STRUCT_SETREF(s,field,value) do { \ mono_gc_wbarrier_generic_store (&((s)->field), (MonoObject*)(value)); \ } while (0) #define mono_array_addr(array,type,index) ((type*)mono_array_addr_with_size ((array), sizeof (type), (index))) #define mono_array_get(array,type,index) ( *(type*)mono_array_addr ((array), type, (index)) ) #define mono_array_set(array,type,index,value) \ do { \ type *__p = (type *) mono_array_addr ((array), type, (index)); \ *__p = (value); \ } while (0) #define mono_array_setref(array,index,value) \ do { \ void **__p = (void **) mono_array_addr ((array), void*, (index)); \ mono_gc_wbarrier_set_arrayref ((array), __p, (MonoObject*)(value)); \ /* *__p = (value);*/ \ } while (0) #define mono_array_memcpy_refs(dest,destidx,src,srcidx,count) \ do { \ void **__p = (void **) mono_array_addr ((dest), void*, (destidx)); \ void **__s = mono_array_addr ((src), void*, (srcidx)); \ mono_gc_wbarrier_arrayref_copy (__p, __s, (count)); \ } while (0) MONO_END_DECLS #endif
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/nativeaot/Runtime/gcheaputilities.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef _GCHEAPUTILITIES_H_ #define _GCHEAPUTILITIES_H_ #include "gcinterface.h" #include "daccess.h" // The singular heap instance. GPTR_DECL(IGCHeap, g_pGCHeap); #ifndef DACCESS_COMPILE extern "C" { #endif // !DACCESS_COMPILE GPTR_DECL(uint8_t,g_lowest_address); GPTR_DECL(uint8_t,g_highest_address); GPTR_DECL(uint32_t,g_card_table); GVAL_DECL(GCHeapType, g_heap_type); #ifndef DACCESS_COMPILE } #endif // !DACCESS_COMPILE #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES extern "C" uint32_t* g_card_bundle_table; #endif // FEATURE_MANUALLY_MANAGED_CARD_BUNDLES extern "C" uint8_t* g_ephemeral_low; extern "C" uint8_t* g_ephemeral_high; #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP extern "C" bool g_sw_ww_enabled_for_gc_heap; extern "C" uint8_t* g_write_watch_table; #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP // g_gc_dac_vars is a structure of pointers to GC globals that the // DAC uses. It is not exposed directly to the DAC. extern GcDacVars g_gc_dac_vars; // Instead of exposing g_gc_dac_vars to the DAC, a pointer to it // is exposed here (g_gcDacGlobals). The reason for this is to avoid // a problem in which a debugger attaches to a program while the program // is in the middle of initializing the GC DAC vars - if the "publishing" // of DAC vars isn't atomic, the debugger could see a partially initialized // GcDacVars structure. // // Instead, the debuggee "publishes" GcDacVars by assigning a pointer to g_gc_dac_vars // to this global, and the DAC will read this global. typedef DPTR(GcDacVars) PTR_GcDacVars; GPTR_DECL(GcDacVars, g_gcDacGlobals); // GCHeapUtilities provides a number of static methods // that operate on the global heap instance. It can't be // instantiated. class GCHeapUtilities { public: // Retrieves the GC heap. inline static IGCHeap* GetGCHeap() { assert(g_pGCHeap != nullptr); return g_pGCHeap; } // Returns true if the heap has been initialized, false otherwise. inline static bool IsGCHeapInitialized() { return g_pGCHeap != nullptr; } // Returns true if a the heap is initialized and a garbage collection // is in progress, false otherwise. inline static BOOL IsGCInProgress(BOOL bConsiderGCStart = FALSE) { return GetGCHeap()->IsGCInProgressHelper(bConsiderGCStart); } // Returns true if the held GC heap is a Server GC heap, false otherwise. inline static bool IsServerHeap() { LIMITED_METHOD_CONTRACT; #ifdef FEATURE_SVR_GC _ASSERTE(g_heap_type != GC_HEAP_INVALID); return (g_heap_type == GC_HEAP_SVR); #else return false; #endif // FEATURE_SVR_GC } #ifndef DACCESS_COMPILE // Initializes a non-standalone GC. static HRESULT InitializeDefaultGC(); // Records a change in eventing state. This ultimately will inform the GC that it needs to be aware // of new events being enabled. static void RecordEventStateChange(bool isPublicProvider, GCEventKeyword keywords, GCEventLevel level); #endif // DACCESS_COMPILE private: // This class should never be instantiated. GCHeapUtilities() = delete; }; #endif // _GCHEAPUTILITIES_H_
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef _GCHEAPUTILITIES_H_ #define _GCHEAPUTILITIES_H_ #include "gcinterface.h" #include "daccess.h" // The singular heap instance. GPTR_DECL(IGCHeap, g_pGCHeap); #ifndef DACCESS_COMPILE extern "C" { #endif // !DACCESS_COMPILE GPTR_DECL(uint8_t,g_lowest_address); GPTR_DECL(uint8_t,g_highest_address); GPTR_DECL(uint32_t,g_card_table); GVAL_DECL(GCHeapType, g_heap_type); #ifndef DACCESS_COMPILE } #endif // !DACCESS_COMPILE #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES extern "C" uint32_t* g_card_bundle_table; #endif // FEATURE_MANUALLY_MANAGED_CARD_BUNDLES extern "C" uint8_t* g_ephemeral_low; extern "C" uint8_t* g_ephemeral_high; #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP extern "C" bool g_sw_ww_enabled_for_gc_heap; extern "C" uint8_t* g_write_watch_table; #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP // g_gc_dac_vars is a structure of pointers to GC globals that the // DAC uses. It is not exposed directly to the DAC. extern GcDacVars g_gc_dac_vars; // Instead of exposing g_gc_dac_vars to the DAC, a pointer to it // is exposed here (g_gcDacGlobals). The reason for this is to avoid // a problem in which a debugger attaches to a program while the program // is in the middle of initializing the GC DAC vars - if the "publishing" // of DAC vars isn't atomic, the debugger could see a partially initialized // GcDacVars structure. // // Instead, the debuggee "publishes" GcDacVars by assigning a pointer to g_gc_dac_vars // to this global, and the DAC will read this global. typedef DPTR(GcDacVars) PTR_GcDacVars; GPTR_DECL(GcDacVars, g_gcDacGlobals); // GCHeapUtilities provides a number of static methods // that operate on the global heap instance. It can't be // instantiated. class GCHeapUtilities { public: // Retrieves the GC heap. inline static IGCHeap* GetGCHeap() { assert(g_pGCHeap != nullptr); return g_pGCHeap; } // Returns true if the heap has been initialized, false otherwise. inline static bool IsGCHeapInitialized() { return g_pGCHeap != nullptr; } // Returns true if a the heap is initialized and a garbage collection // is in progress, false otherwise. inline static BOOL IsGCInProgress(BOOL bConsiderGCStart = FALSE) { return GetGCHeap()->IsGCInProgressHelper(bConsiderGCStart); } // Returns true if the held GC heap is a Server GC heap, false otherwise. inline static bool IsServerHeap() { LIMITED_METHOD_CONTRACT; #ifdef FEATURE_SVR_GC _ASSERTE(g_heap_type != GC_HEAP_INVALID); return (g_heap_type == GC_HEAP_SVR); #else return false; #endif // FEATURE_SVR_GC } #ifndef DACCESS_COMPILE // Initializes a non-standalone GC. static HRESULT InitializeDefaultGC(); // Records a change in eventing state. This ultimately will inform the GC that it needs to be aware // of new events being enabled. static void RecordEventStateChange(bool isPublicProvider, GCEventKeyword keywords, GCEventLevel level); #endif // DACCESS_COMPILE private: // This class should never be instantiated. GCHeapUtilities() = delete; }; #endif // _GCHEAPUTILITIES_H_
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/minipal/minipal.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #include <stddef.h> // Interface between the runtime and platform specific functionality class VMToOSInterface { private: ~VMToOSInterface() {} public: // Create double mapped memory mapper // Parameters: // pHandle - receives handle of the double mapped memory mapper // pMaxExecutableCodeSize - receives the maximum executable memory size it can map // Return: // true if it succeeded, false if it failed static bool CreateDoubleMemoryMapper(void **pHandle, size_t *pMaxExecutableCodeSize); // Destroy the double mapped memory mapper represented by the passed in handle // Parameters: // mapperHandle - handle of the double mapped memory mapper to destroy static void DestroyDoubleMemoryMapper(void *mapperHandle); // Reserve a block of memory that can be double mapped. // Parameters: // mapperHandle - handle of the double mapped memory mapper to use // offset - offset in the underlying shared memory // size - size of the block to reserve // rangeStart // rangeEnd - Requests reserving virtual memory in the specified range. // Setting both rangeStart and rangeEnd to 0 means that the // requested range is not limited. // When a specific range is requested, it is obligatory. // Return: // starting virtual address of the reserved memory or NULL if it failed static void* ReserveDoubleMappedMemory(void *mapperHandle, size_t offset, size_t size, const void *rangeStart, const void* rangeEnd); // Commit a block of memory in the range previously reserved by the ReserveDoubleMappedMemory // Parameters: // pStart - start address of the virtual address range to commit // size - size of the memory block to commit // isExecutable - true means that the mapping should be RX, false means RW // Return: // Committed range start static void* CommitDoubleMappedMemory(void* pStart, size_t size, bool isExecutable); // Release a block of virtual memory previously commited by the CommitDoubleMappedMemory // Parameters: // mapperHandle - handle of the double mapped memory mapper to use // pStart - start address of the virtual address range to release. It must be one // that was previously returned by the CommitDoubleMappedMemory // offset - offset in the underlying shared memory // size - size of the memory block to release // Return: // true if it succeeded, false if it failed static bool ReleaseDoubleMappedMemory(void *mapperHandle, void* pStart, size_t offset, size_t size); // Get a RW mapping for the RX block specified by the arguments // Parameters: // mapperHandle - handle of the double mapped memory mapper to use // pStart - start address of the RX virtual address range. // offset - offset in the underlying shared memory // size - size of the memory block to map as RW // Return: // Starting virtual address of the RW mapping. static void* GetRWMapping(void *mapperHandle, void* pStart, size_t offset, size_t size); // Release RW mapping of the block specified by the arguments // Parameters: // pStart - Start address of the RW virtual address range. It must be an address // previously returned by the GetRWMapping. // size - Size of the memory block to release. It must be the size previously // passed to the GetRWMapping that returned the pStart. // Return: // true if it succeeded, false if it failed static bool ReleaseRWMapping(void* pStart, size_t size); };
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #include <stddef.h> // Interface between the runtime and platform specific functionality class VMToOSInterface { private: ~VMToOSInterface() {} public: // Create double mapped memory mapper // Parameters: // pHandle - receives handle of the double mapped memory mapper // pMaxExecutableCodeSize - receives the maximum executable memory size it can map // Return: // true if it succeeded, false if it failed static bool CreateDoubleMemoryMapper(void **pHandle, size_t *pMaxExecutableCodeSize); // Destroy the double mapped memory mapper represented by the passed in handle // Parameters: // mapperHandle - handle of the double mapped memory mapper to destroy static void DestroyDoubleMemoryMapper(void *mapperHandle); // Reserve a block of memory that can be double mapped. // Parameters: // mapperHandle - handle of the double mapped memory mapper to use // offset - offset in the underlying shared memory // size - size of the block to reserve // rangeStart // rangeEnd - Requests reserving virtual memory in the specified range. // Setting both rangeStart and rangeEnd to 0 means that the // requested range is not limited. // When a specific range is requested, it is obligatory. // Return: // starting virtual address of the reserved memory or NULL if it failed static void* ReserveDoubleMappedMemory(void *mapperHandle, size_t offset, size_t size, const void *rangeStart, const void* rangeEnd); // Commit a block of memory in the range previously reserved by the ReserveDoubleMappedMemory // Parameters: // pStart - start address of the virtual address range to commit // size - size of the memory block to commit // isExecutable - true means that the mapping should be RX, false means RW // Return: // Committed range start static void* CommitDoubleMappedMemory(void* pStart, size_t size, bool isExecutable); // Release a block of virtual memory previously commited by the CommitDoubleMappedMemory // Parameters: // mapperHandle - handle of the double mapped memory mapper to use // pStart - start address of the virtual address range to release. It must be one // that was previously returned by the CommitDoubleMappedMemory // offset - offset in the underlying shared memory // size - size of the memory block to release // Return: // true if it succeeded, false if it failed static bool ReleaseDoubleMappedMemory(void *mapperHandle, void* pStart, size_t offset, size_t size); // Get a RW mapping for the RX block specified by the arguments // Parameters: // mapperHandle - handle of the double mapped memory mapper to use // pStart - start address of the RX virtual address range. // offset - offset in the underlying shared memory // size - size of the memory block to map as RW // Return: // Starting virtual address of the RW mapping. static void* GetRWMapping(void *mapperHandle, void* pStart, size_t offset, size_t size); // Release RW mapping of the block specified by the arguments // Parameters: // pStart - Start address of the RW virtual address range. It must be an address // previously returned by the GetRWMapping. // size - Size of the memory block to release. It must be the size previously // passed to the GetRWMapping that returned the pStart. // Return: // true if it succeeded, false if it failed static bool ReleaseRWMapping(void* pStart, size_t size); };
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/vm/nativeoverlapped.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Header: COMNativeOverlapped.h ** ** Purpose: Native methods for allocating and freeing NativeOverlapped ** ** ===========================================================*/ #include "common.h" #include "fcall.h" #include "nativeoverlapped.h" #include "corhost.h" #include "win32threadpool.h" #include "comsynchronizable.h" #include "comthreadpool.h" #include "marshalnative.h" // //The function is called from managed code to quicly check if a packet is available. //This is a perf-critical function. Even helper method frames are not created. We fall //back to the VM to do heavy weight operations like creating a new CP thread. // FCIMPL3(void, CheckVMForIOPacket, LPOVERLAPPED* lpOverlapped, DWORD* errorCode, DWORD* numBytes) { FCALL_CONTRACT; #ifndef TARGET_UNIX Thread *pThread = GetThread(); size_t key=0; //Poll and wait if GC is in progress, to avoid blocking GC for too long. FC_GC_POLL(); *lpOverlapped = ThreadpoolMgr::CompletionPortDispatchWorkWithinAppDomain(pThread, errorCode, numBytes, &key); if(*lpOverlapped == NULL) { return; } OVERLAPPEDDATAREF overlapped = ObjectToOVERLAPPEDDATAREF(OverlappedDataObject::GetOverlapped(*lpOverlapped)); if (overlapped->m_callback == NULL) { //We're not initialized yet, go back to the Vm, and process the packet there. ThreadpoolMgr::StoreOverlappedInfoInThread(pThread, *errorCode, *numBytes, key, *lpOverlapped); *lpOverlapped = NULL; return; } else { if(!pThread->IsRealThreadPoolResetNeeded()) { pThread->ResetManagedThreadObjectInCoopMode(ThreadNative::PRIORITY_NORMAL); pThread->InternalReset(TRUE, FALSE, FALSE); if(ThreadpoolMgr::ShouldGrowCompletionPortThreadpool(ThreadpoolMgr::CPThreadCounter.DangerousGetDirtyCounts())) { //We may have to create a CP thread, go back to the Vm, and process the packet there. ThreadpoolMgr::StoreOverlappedInfoInThread(pThread, *errorCode, *numBytes, key, *lpOverlapped); *lpOverlapped = NULL; } } else { //A more complete reset is needed (due to change in priority etc), go back to the VM, //and process the packet there. ThreadpoolMgr::StoreOverlappedInfoInThread(pThread, *errorCode, *numBytes, key, *lpOverlapped); *lpOverlapped = NULL; } } // if this will be "dispatched" to the managed callback fire the IODequeue event: if (*lpOverlapped != NULL && ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, ThreadPoolIODequeue)) FireEtwThreadPoolIODequeue(*lpOverlapped, OverlappedDataObject::GetOverlapped(*lpOverlapped), GetClrInstanceId()); #else // !TARGET_UNIX *lpOverlapped = NULL; #endif // !TARGET_UNIX return; } FCIMPLEND FCIMPL1(LPOVERLAPPED, AllocateNativeOverlapped, OverlappedDataObject* overlappedUNSAFE) { FCALL_CONTRACT; LPOVERLAPPED lpOverlapped; OVERLAPPEDDATAREF overlapped = ObjectToOVERLAPPEDDATAREF(overlappedUNSAFE); OBJECTREF userObject = overlapped->m_userObject; HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_2(Frame::FRAME_ATTR_NONE, overlapped, userObject); if (g_pOverlappedDataClass == NULL) { g_pOverlappedDataClass = CoreLibBinder::GetClass(CLASS__OVERLAPPEDDATA); // We have optimization to avoid creating event if IO is in default domain. This depends on default domain // can not be unloaded. } CONSISTENCY_CHECK(overlapped->GetMethodTable() == g_pOverlappedDataClass); if (userObject != NULL) { if (userObject->GetMethodTable() == g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT].AsMethodTable()) { BASEARRAYREF asArray = (BASEARRAYREF) userObject; OBJECTREF *pObj = (OBJECTREF*)(asArray->GetDataPtr()); SIZE_T num = asArray->GetNumComponents(); SIZE_T i; for (i = 0; i < num; i ++) { ValidatePinnedObject(pObj[i]); } } else { ValidatePinnedObject(userObject); } } NewHolder<NATIVEOVERLAPPED_AND_HANDLE> overlappedHolder(new NATIVEOVERLAPPED_AND_HANDLE()); overlappedHolder->m_handle = GetAppDomain()->CreateTypedHandle(overlapped, HNDTYPE_ASYNCPINNED); lpOverlapped = &(overlappedHolder.Extract()->m_overlapped); lpOverlapped->Internal = 0; lpOverlapped->InternalHigh = 0; lpOverlapped->Offset = overlapped->m_offsetLow; lpOverlapped->OffsetHigh = overlapped->m_offsetHigh; lpOverlapped->hEvent = (HANDLE)overlapped->m_eventHandle; overlapped->m_pNativeOverlapped = lpOverlapped; HELPER_METHOD_FRAME_END(); LOG((LF_INTEROP, LL_INFO10000, "In AllocNativeOperlapped thread 0x%x\n", GetThread())); if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, ThreadPoolIODequeue)) FireEtwThreadPoolIOPack(lpOverlapped, overlappedUNSAFE, GetClrInstanceId()); return lpOverlapped; } FCIMPLEND FCIMPL1(void, FreeNativeOverlapped, LPOVERLAPPED lpOverlapped) { FCALL_CONTRACT; HELPER_METHOD_FRAME_BEGIN_0(); CONSISTENCY_CHECK(g_pOverlappedDataClass && (OverlappedDataObject::GetOverlapped(lpOverlapped)->GetMethodTable() == g_pOverlappedDataClass)); DestroyAsyncPinningHandle(((NATIVEOVERLAPPED_AND_HANDLE*)lpOverlapped)->m_handle); delete lpOverlapped; HELPER_METHOD_FRAME_END(); } FCIMPLEND FCIMPL1(OverlappedDataObject*, GetOverlappedFromNative, LPOVERLAPPED lpOverlapped) { FCALL_CONTRACT; CONSISTENCY_CHECK(g_pOverlappedDataClass && (OverlappedDataObject::GetOverlapped(lpOverlapped)->GetMethodTable() == g_pOverlappedDataClass)); return OverlappedDataObject::GetOverlapped(lpOverlapped); } FCIMPLEND
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Header: COMNativeOverlapped.h ** ** Purpose: Native methods for allocating and freeing NativeOverlapped ** ** ===========================================================*/ #include "common.h" #include "fcall.h" #include "nativeoverlapped.h" #include "corhost.h" #include "win32threadpool.h" #include "comsynchronizable.h" #include "comthreadpool.h" #include "marshalnative.h" // //The function is called from managed code to quicly check if a packet is available. //This is a perf-critical function. Even helper method frames are not created. We fall //back to the VM to do heavy weight operations like creating a new CP thread. // FCIMPL3(void, CheckVMForIOPacket, LPOVERLAPPED* lpOverlapped, DWORD* errorCode, DWORD* numBytes) { FCALL_CONTRACT; #ifndef TARGET_UNIX Thread *pThread = GetThread(); size_t key=0; //Poll and wait if GC is in progress, to avoid blocking GC for too long. FC_GC_POLL(); *lpOverlapped = ThreadpoolMgr::CompletionPortDispatchWorkWithinAppDomain(pThread, errorCode, numBytes, &key); if(*lpOverlapped == NULL) { return; } OVERLAPPEDDATAREF overlapped = ObjectToOVERLAPPEDDATAREF(OverlappedDataObject::GetOverlapped(*lpOverlapped)); if (overlapped->m_callback == NULL) { //We're not initialized yet, go back to the Vm, and process the packet there. ThreadpoolMgr::StoreOverlappedInfoInThread(pThread, *errorCode, *numBytes, key, *lpOverlapped); *lpOverlapped = NULL; return; } else { if(!pThread->IsRealThreadPoolResetNeeded()) { pThread->ResetManagedThreadObjectInCoopMode(ThreadNative::PRIORITY_NORMAL); pThread->InternalReset(TRUE, FALSE, FALSE); if(ThreadpoolMgr::ShouldGrowCompletionPortThreadpool(ThreadpoolMgr::CPThreadCounter.DangerousGetDirtyCounts())) { //We may have to create a CP thread, go back to the Vm, and process the packet there. ThreadpoolMgr::StoreOverlappedInfoInThread(pThread, *errorCode, *numBytes, key, *lpOverlapped); *lpOverlapped = NULL; } } else { //A more complete reset is needed (due to change in priority etc), go back to the VM, //and process the packet there. ThreadpoolMgr::StoreOverlappedInfoInThread(pThread, *errorCode, *numBytes, key, *lpOverlapped); *lpOverlapped = NULL; } } // if this will be "dispatched" to the managed callback fire the IODequeue event: if (*lpOverlapped != NULL && ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, ThreadPoolIODequeue)) FireEtwThreadPoolIODequeue(*lpOverlapped, OverlappedDataObject::GetOverlapped(*lpOverlapped), GetClrInstanceId()); #else // !TARGET_UNIX *lpOverlapped = NULL; #endif // !TARGET_UNIX return; } FCIMPLEND FCIMPL1(LPOVERLAPPED, AllocateNativeOverlapped, OverlappedDataObject* overlappedUNSAFE) { FCALL_CONTRACT; LPOVERLAPPED lpOverlapped; OVERLAPPEDDATAREF overlapped = ObjectToOVERLAPPEDDATAREF(overlappedUNSAFE); OBJECTREF userObject = overlapped->m_userObject; HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_2(Frame::FRAME_ATTR_NONE, overlapped, userObject); if (g_pOverlappedDataClass == NULL) { g_pOverlappedDataClass = CoreLibBinder::GetClass(CLASS__OVERLAPPEDDATA); // We have optimization to avoid creating event if IO is in default domain. This depends on default domain // can not be unloaded. } CONSISTENCY_CHECK(overlapped->GetMethodTable() == g_pOverlappedDataClass); if (userObject != NULL) { if (userObject->GetMethodTable() == g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT].AsMethodTable()) { BASEARRAYREF asArray = (BASEARRAYREF) userObject; OBJECTREF *pObj = (OBJECTREF*)(asArray->GetDataPtr()); SIZE_T num = asArray->GetNumComponents(); SIZE_T i; for (i = 0; i < num; i ++) { ValidatePinnedObject(pObj[i]); } } else { ValidatePinnedObject(userObject); } } NewHolder<NATIVEOVERLAPPED_AND_HANDLE> overlappedHolder(new NATIVEOVERLAPPED_AND_HANDLE()); overlappedHolder->m_handle = GetAppDomain()->CreateTypedHandle(overlapped, HNDTYPE_ASYNCPINNED); lpOverlapped = &(overlappedHolder.Extract()->m_overlapped); lpOverlapped->Internal = 0; lpOverlapped->InternalHigh = 0; lpOverlapped->Offset = overlapped->m_offsetLow; lpOverlapped->OffsetHigh = overlapped->m_offsetHigh; lpOverlapped->hEvent = (HANDLE)overlapped->m_eventHandle; overlapped->m_pNativeOverlapped = lpOverlapped; HELPER_METHOD_FRAME_END(); LOG((LF_INTEROP, LL_INFO10000, "In AllocNativeOperlapped thread 0x%x\n", GetThread())); if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, ThreadPoolIODequeue)) FireEtwThreadPoolIOPack(lpOverlapped, overlappedUNSAFE, GetClrInstanceId()); return lpOverlapped; } FCIMPLEND FCIMPL1(void, FreeNativeOverlapped, LPOVERLAPPED lpOverlapped) { FCALL_CONTRACT; HELPER_METHOD_FRAME_BEGIN_0(); CONSISTENCY_CHECK(g_pOverlappedDataClass && (OverlappedDataObject::GetOverlapped(lpOverlapped)->GetMethodTable() == g_pOverlappedDataClass)); DestroyAsyncPinningHandle(((NATIVEOVERLAPPED_AND_HANDLE*)lpOverlapped)->m_handle); delete lpOverlapped; HELPER_METHOD_FRAME_END(); } FCIMPLEND FCIMPL1(OverlappedDataObject*, GetOverlappedFromNative, LPOVERLAPPED lpOverlapped) { FCALL_CONTRACT; CONSISTENCY_CHECK(g_pOverlappedDataClass && (OverlappedDataObject::GetOverlapped(lpOverlapped)->GetMethodTable() == g_pOverlappedDataClass)); return OverlappedDataObject::GetOverlapped(lpOverlapped); } FCIMPLEND
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/vm/perfinfo.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // =========================================================================== // File: perfinfo.h // #ifndef PERFINFO_H #define PERFINFO_H #include "sstring.h" #include "fstream.h" /* A perfinfo-%d.map is created for every process that is created with manage code, the %d being repaced with the process ID. Every line in the perfinfo-%d.map is a type and value, separated by sDelimiter character: type;value type represents what the user might want to do with its given value. value has a format chosen by the user for parsing later on. */ class PerfInfo { public: PerfInfo(int pid); ~PerfInfo(); void LogImage(PEAssembly* pPEAssembly, WCHAR* guid); private: CFileStream* m_Stream; const char sDelimiter = ';'; void OpenFile(SString& path); void WriteLine(SString& type, SString& value); }; #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // =========================================================================== // File: perfinfo.h // #ifndef PERFINFO_H #define PERFINFO_H #include "sstring.h" #include "fstream.h" /* A perfinfo-%d.map is created for every process that is created with manage code, the %d being repaced with the process ID. Every line in the perfinfo-%d.map is a type and value, separated by sDelimiter character: type;value type represents what the user might want to do with its given value. value has a format chosen by the user for parsing later on. */ class PerfInfo { public: PerfInfo(int pid); ~PerfInfo(); void LogImage(PEAssembly* pPEAssembly, WCHAR* guid); private: CFileStream* m_Stream; const char sDelimiter = ';'; void OpenFile(SString& path); void WriteLine(SString& type, SString& value); }; #endif
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/mono/mono/utils/mono-coop-semaphore.h
/** * \file */ #ifndef __MONO_COOP_SEMAPHORE_H__ #define __MONO_COOP_SEMAPHORE_H__ #include <config.h> #include <glib.h> #include "mono-os-semaphore.h" #include "mono-threads-api.h" /* We put the OS sync primitives in struct, so the compiler will warn us if * we use mono_os_(mutex|cond|sem)_... on MonoCoop(Mutex|Cond|Sem) structures */ typedef struct _MonoCoopSem MonoCoopSem; struct _MonoCoopSem { MonoSemType s; }; static inline void mono_coop_sem_init (MonoCoopSem *sem, int value) { mono_os_sem_init (&sem->s, value); } static inline void mono_coop_sem_destroy (MonoCoopSem *sem) { mono_os_sem_destroy (&sem->s); } static inline gint mono_coop_sem_wait (MonoCoopSem *sem, MonoSemFlags flags) { gint res; MONO_ENTER_GC_SAFE; res = mono_os_sem_wait (&sem->s, flags); MONO_EXIT_GC_SAFE; return res; } static inline MonoSemTimedwaitRet mono_coop_sem_timedwait (MonoCoopSem *sem, guint timeout_ms, MonoSemFlags flags) { MonoSemTimedwaitRet res; MONO_ENTER_GC_SAFE; res = mono_os_sem_timedwait (&sem->s, timeout_ms, flags); MONO_EXIT_GC_SAFE; return res; } static inline void mono_coop_sem_post (MonoCoopSem *sem) { mono_os_sem_post (&sem->s); } #endif /* __MONO_COOP_SEMAPHORE_H__ */
/** * \file */ #ifndef __MONO_COOP_SEMAPHORE_H__ #define __MONO_COOP_SEMAPHORE_H__ #include <config.h> #include <glib.h> #include "mono-os-semaphore.h" #include "mono-threads-api.h" /* We put the OS sync primitives in struct, so the compiler will warn us if * we use mono_os_(mutex|cond|sem)_... on MonoCoop(Mutex|Cond|Sem) structures */ typedef struct _MonoCoopSem MonoCoopSem; struct _MonoCoopSem { MonoSemType s; }; static inline void mono_coop_sem_init (MonoCoopSem *sem, int value) { mono_os_sem_init (&sem->s, value); } static inline void mono_coop_sem_destroy (MonoCoopSem *sem) { mono_os_sem_destroy (&sem->s); } static inline gint mono_coop_sem_wait (MonoCoopSem *sem, MonoSemFlags flags) { gint res; MONO_ENTER_GC_SAFE; res = mono_os_sem_wait (&sem->s, flags); MONO_EXIT_GC_SAFE; return res; } static inline MonoSemTimedwaitRet mono_coop_sem_timedwait (MonoCoopSem *sem, guint timeout_ms, MonoSemFlags flags) { MonoSemTimedwaitRet res; MONO_ENTER_GC_SAFE; res = mono_os_sem_timedwait (&sem->s, timeout_ms, flags); MONO_EXIT_GC_SAFE; return res; } static inline void mono_coop_sem_post (MonoCoopSem *sem) { mono_os_sem_post (&sem->s); } #endif /* __MONO_COOP_SEMAPHORE_H__ */
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/nativeaot/libunwind/src/Unwind_AppleExtras.cpp
//===--------------------- Unwind_AppleExtras.cpp -------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // // //===----------------------------------------------------------------------===// #include "config.h" #include "AddressSpace.hpp" #include "DwarfParser.hpp" // private keymgr stuff #define KEYMGR_GCC3_DW2_OBJ_LIST 302 extern "C" { extern void _keymgr_set_and_unlock_processwide_ptr(int key, void *ptr); extern void *_keymgr_get_and_lock_processwide_ptr(int key); } // undocumented libgcc "struct object" struct libgcc_object { void *start; void *unused1; void *unused2; void *fde; unsigned long encoding; void *fde_end; libgcc_object *next; }; // undocumented libgcc "struct km_object_info" referenced by // KEYMGR_GCC3_DW2_OBJ_LIST struct libgcc_object_info { libgcc_object *seen_objects; libgcc_object *unseen_objects; unsigned spare[2]; }; // static linker symbols to prevent wrong two level namespace for _Unwind symbols #if defined(__arm__) #define NOT_HERE_BEFORE_5_0(sym) \ extern const char sym##_tmp30 __asm("$ld$hide$os3.0$_" #sym ); \ __attribute__((visibility("default"))) const char sym##_tmp30 = 0; \ extern const char sym##_tmp31 __asm("$ld$hide$os3.1$_" #sym ); \ __attribute__((visibility("default"))) const char sym##_tmp31 = 0; \ extern const char sym##_tmp32 __asm("$ld$hide$os3.2$_" #sym );\ __attribute__((visibility("default"))) const char sym##_tmp32 = 0; \ extern const char sym##_tmp40 __asm("$ld$hide$os4.0$_" #sym ); \ __attribute__((visibility("default"))) const char sym##_tmp40 = 0; \ extern const char sym##_tmp41 __asm("$ld$hide$os4.1$_" #sym ); \ __attribute__((visibility("default"))) const char sym##_tmp41 = 0; \ extern const char sym##_tmp42 __asm("$ld$hide$os4.2$_" #sym ); \ __attribute__((visibility("default"))) const char sym##_tmp42 = 0; \ extern const char sym##_tmp43 __asm("$ld$hide$os4.3$_" #sym ); \ __attribute__((visibility("default"))) const char sym##_tmp43 = 0; #elif defined(__arm64__) #define NOT_HERE_BEFORE_10_6(sym) #define NEVER_HERE(sym) #else #define NOT_HERE_BEFORE_10_6(sym) \ extern const char sym##_tmp4 __asm("$ld$hide$os10.4$_" #sym ); \ __attribute__((visibility("default"))) const char sym##_tmp4 = 0; \ extern const char sym##_tmp5 __asm("$ld$hide$os10.5$_" #sym ); \ __attribute__((visibility("default"))) const char sym##_tmp5 = 0; #define NEVER_HERE(sym) \ extern const char sym##_tmp4 __asm("$ld$hide$os10.4$_" #sym ); \ __attribute__((visibility("default"))) const char sym##_tmp4 = 0; \ extern const char sym##_tmp5 __asm("$ld$hide$os10.5$_" #sym ); \ __attribute__((visibility("default"))) const char sym##_tmp5 = 0; \ extern const char sym##_tmp6 __asm("$ld$hide$os10.6$_" #sym ); \ __attribute__((visibility("default"))) const char sym##_tmp6 = 0; #endif #if defined(_LIBUNWIND_BUILD_ZERO_COST_APIS) // // symbols in libSystem.dylib in 10.6 and later, but are in libgcc_s.dylib in // earlier versions // NOT_HERE_BEFORE_10_6(_Unwind_DeleteException) NOT_HERE_BEFORE_10_6(_Unwind_Find_FDE) NOT_HERE_BEFORE_10_6(_Unwind_ForcedUnwind) NOT_HERE_BEFORE_10_6(_Unwind_GetGR) NOT_HERE_BEFORE_10_6(_Unwind_GetIP) NOT_HERE_BEFORE_10_6(_Unwind_GetLanguageSpecificData) NOT_HERE_BEFORE_10_6(_Unwind_GetRegionStart) NOT_HERE_BEFORE_10_6(_Unwind_RaiseException) NOT_HERE_BEFORE_10_6(_Unwind_Resume) NOT_HERE_BEFORE_10_6(_Unwind_SetGR) NOT_HERE_BEFORE_10_6(_Unwind_SetIP) NOT_HERE_BEFORE_10_6(_Unwind_Backtrace) NOT_HERE_BEFORE_10_6(_Unwind_FindEnclosingFunction) NOT_HERE_BEFORE_10_6(_Unwind_GetCFA) NOT_HERE_BEFORE_10_6(_Unwind_GetDataRelBase) NOT_HERE_BEFORE_10_6(_Unwind_GetTextRelBase) NOT_HERE_BEFORE_10_6(_Unwind_Resume_or_Rethrow) NOT_HERE_BEFORE_10_6(_Unwind_GetIPInfo) NOT_HERE_BEFORE_10_6(__register_frame) NOT_HERE_BEFORE_10_6(__deregister_frame) // // symbols in libSystem.dylib for compatibility, but we don't want any new code // using them // NEVER_HERE(__register_frame_info_bases) NEVER_HERE(__register_frame_info) NEVER_HERE(__register_frame_info_table_bases) NEVER_HERE(__register_frame_info_table) NEVER_HERE(__register_frame_table) NEVER_HERE(__deregister_frame_info) NEVER_HERE(__deregister_frame_info_bases) #endif // defined(_LIBUNWIND_BUILD_ZERO_COST_APIS) #if defined(_LIBUNWIND_BUILD_SJLJ_APIS) // // symbols in libSystem.dylib in iOS 5.0 and later, but are in libgcc_s.dylib in // earlier versions // NOT_HERE_BEFORE_5_0(_Unwind_GetLanguageSpecificData) NOT_HERE_BEFORE_5_0(_Unwind_GetRegionStart) NOT_HERE_BEFORE_5_0(_Unwind_GetIP) NOT_HERE_BEFORE_5_0(_Unwind_SetGR) NOT_HERE_BEFORE_5_0(_Unwind_SetIP) NOT_HERE_BEFORE_5_0(_Unwind_DeleteException) NOT_HERE_BEFORE_5_0(_Unwind_SjLj_Register) NOT_HERE_BEFORE_5_0(_Unwind_GetGR) NOT_HERE_BEFORE_5_0(_Unwind_GetIPInfo) NOT_HERE_BEFORE_5_0(_Unwind_GetCFA) NOT_HERE_BEFORE_5_0(_Unwind_SjLj_Resume) NOT_HERE_BEFORE_5_0(_Unwind_SjLj_RaiseException) NOT_HERE_BEFORE_5_0(_Unwind_SjLj_Resume_or_Rethrow) NOT_HERE_BEFORE_5_0(_Unwind_SjLj_Unregister) #endif // defined(_LIBUNWIND_BUILD_SJLJ_APIS) namespace libunwind { _LIBUNWIND_HIDDEN bool checkKeyMgrRegisteredFDEs(uintptr_t pc, void *&fde) { #if __MAC_OS_X_VERSION_MIN_REQUIRED // lastly check for old style keymgr registration of dynamically generated // FDEs acquire exclusive access to libgcc_object_info libgcc_object_info *head = (libgcc_object_info *) _keymgr_get_and_lock_processwide_ptr(KEYMGR_GCC3_DW2_OBJ_LIST); if (head != NULL) { // look at each FDE in keymgr for (libgcc_object *ob = head->unseen_objects; ob != NULL; ob = ob->next) { CFI_Parser<LocalAddressSpace>::FDE_Info fdeInfo; CFI_Parser<LocalAddressSpace>::CIE_Info cieInfo; const char *msg = CFI_Parser<LocalAddressSpace>::decodeFDE( LocalAddressSpace::sThisAddressSpace, (uintptr_t)ob->fde, &fdeInfo, &cieInfo); if (msg == NULL) { // Check if this FDE is for a function that includes the pc if ((fdeInfo.pcStart <= pc) && (pc < fdeInfo.pcEnd)) { fde = (void*)fdeInfo.pcStart; _keymgr_set_and_unlock_processwide_ptr(KEYMGR_GCC3_DW2_OBJ_LIST, head); return true; } } } } // release libgcc_object_info _keymgr_set_and_unlock_processwide_ptr(KEYMGR_GCC3_DW2_OBJ_LIST, head); #else (void)pc; (void)fde; #endif return false; } }
//===--------------------- Unwind_AppleExtras.cpp -------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // // //===----------------------------------------------------------------------===// #include "config.h" #include "AddressSpace.hpp" #include "DwarfParser.hpp" // private keymgr stuff #define KEYMGR_GCC3_DW2_OBJ_LIST 302 extern "C" { extern void _keymgr_set_and_unlock_processwide_ptr(int key, void *ptr); extern void *_keymgr_get_and_lock_processwide_ptr(int key); } // undocumented libgcc "struct object" struct libgcc_object { void *start; void *unused1; void *unused2; void *fde; unsigned long encoding; void *fde_end; libgcc_object *next; }; // undocumented libgcc "struct km_object_info" referenced by // KEYMGR_GCC3_DW2_OBJ_LIST struct libgcc_object_info { libgcc_object *seen_objects; libgcc_object *unseen_objects; unsigned spare[2]; }; // static linker symbols to prevent wrong two level namespace for _Unwind symbols #if defined(__arm__) #define NOT_HERE_BEFORE_5_0(sym) \ extern const char sym##_tmp30 __asm("$ld$hide$os3.0$_" #sym ); \ __attribute__((visibility("default"))) const char sym##_tmp30 = 0; \ extern const char sym##_tmp31 __asm("$ld$hide$os3.1$_" #sym ); \ __attribute__((visibility("default"))) const char sym##_tmp31 = 0; \ extern const char sym##_tmp32 __asm("$ld$hide$os3.2$_" #sym );\ __attribute__((visibility("default"))) const char sym##_tmp32 = 0; \ extern const char sym##_tmp40 __asm("$ld$hide$os4.0$_" #sym ); \ __attribute__((visibility("default"))) const char sym##_tmp40 = 0; \ extern const char sym##_tmp41 __asm("$ld$hide$os4.1$_" #sym ); \ __attribute__((visibility("default"))) const char sym##_tmp41 = 0; \ extern const char sym##_tmp42 __asm("$ld$hide$os4.2$_" #sym ); \ __attribute__((visibility("default"))) const char sym##_tmp42 = 0; \ extern const char sym##_tmp43 __asm("$ld$hide$os4.3$_" #sym ); \ __attribute__((visibility("default"))) const char sym##_tmp43 = 0; #elif defined(__arm64__) #define NOT_HERE_BEFORE_10_6(sym) #define NEVER_HERE(sym) #else #define NOT_HERE_BEFORE_10_6(sym) \ extern const char sym##_tmp4 __asm("$ld$hide$os10.4$_" #sym ); \ __attribute__((visibility("default"))) const char sym##_tmp4 = 0; \ extern const char sym##_tmp5 __asm("$ld$hide$os10.5$_" #sym ); \ __attribute__((visibility("default"))) const char sym##_tmp5 = 0; #define NEVER_HERE(sym) \ extern const char sym##_tmp4 __asm("$ld$hide$os10.4$_" #sym ); \ __attribute__((visibility("default"))) const char sym##_tmp4 = 0; \ extern const char sym##_tmp5 __asm("$ld$hide$os10.5$_" #sym ); \ __attribute__((visibility("default"))) const char sym##_tmp5 = 0; \ extern const char sym##_tmp6 __asm("$ld$hide$os10.6$_" #sym ); \ __attribute__((visibility("default"))) const char sym##_tmp6 = 0; #endif #if defined(_LIBUNWIND_BUILD_ZERO_COST_APIS) // // symbols in libSystem.dylib in 10.6 and later, but are in libgcc_s.dylib in // earlier versions // NOT_HERE_BEFORE_10_6(_Unwind_DeleteException) NOT_HERE_BEFORE_10_6(_Unwind_Find_FDE) NOT_HERE_BEFORE_10_6(_Unwind_ForcedUnwind) NOT_HERE_BEFORE_10_6(_Unwind_GetGR) NOT_HERE_BEFORE_10_6(_Unwind_GetIP) NOT_HERE_BEFORE_10_6(_Unwind_GetLanguageSpecificData) NOT_HERE_BEFORE_10_6(_Unwind_GetRegionStart) NOT_HERE_BEFORE_10_6(_Unwind_RaiseException) NOT_HERE_BEFORE_10_6(_Unwind_Resume) NOT_HERE_BEFORE_10_6(_Unwind_SetGR) NOT_HERE_BEFORE_10_6(_Unwind_SetIP) NOT_HERE_BEFORE_10_6(_Unwind_Backtrace) NOT_HERE_BEFORE_10_6(_Unwind_FindEnclosingFunction) NOT_HERE_BEFORE_10_6(_Unwind_GetCFA) NOT_HERE_BEFORE_10_6(_Unwind_GetDataRelBase) NOT_HERE_BEFORE_10_6(_Unwind_GetTextRelBase) NOT_HERE_BEFORE_10_6(_Unwind_Resume_or_Rethrow) NOT_HERE_BEFORE_10_6(_Unwind_GetIPInfo) NOT_HERE_BEFORE_10_6(__register_frame) NOT_HERE_BEFORE_10_6(__deregister_frame) // // symbols in libSystem.dylib for compatibility, but we don't want any new code // using them // NEVER_HERE(__register_frame_info_bases) NEVER_HERE(__register_frame_info) NEVER_HERE(__register_frame_info_table_bases) NEVER_HERE(__register_frame_info_table) NEVER_HERE(__register_frame_table) NEVER_HERE(__deregister_frame_info) NEVER_HERE(__deregister_frame_info_bases) #endif // defined(_LIBUNWIND_BUILD_ZERO_COST_APIS) #if defined(_LIBUNWIND_BUILD_SJLJ_APIS) // // symbols in libSystem.dylib in iOS 5.0 and later, but are in libgcc_s.dylib in // earlier versions // NOT_HERE_BEFORE_5_0(_Unwind_GetLanguageSpecificData) NOT_HERE_BEFORE_5_0(_Unwind_GetRegionStart) NOT_HERE_BEFORE_5_0(_Unwind_GetIP) NOT_HERE_BEFORE_5_0(_Unwind_SetGR) NOT_HERE_BEFORE_5_0(_Unwind_SetIP) NOT_HERE_BEFORE_5_0(_Unwind_DeleteException) NOT_HERE_BEFORE_5_0(_Unwind_SjLj_Register) NOT_HERE_BEFORE_5_0(_Unwind_GetGR) NOT_HERE_BEFORE_5_0(_Unwind_GetIPInfo) NOT_HERE_BEFORE_5_0(_Unwind_GetCFA) NOT_HERE_BEFORE_5_0(_Unwind_SjLj_Resume) NOT_HERE_BEFORE_5_0(_Unwind_SjLj_RaiseException) NOT_HERE_BEFORE_5_0(_Unwind_SjLj_Resume_or_Rethrow) NOT_HERE_BEFORE_5_0(_Unwind_SjLj_Unregister) #endif // defined(_LIBUNWIND_BUILD_SJLJ_APIS) namespace libunwind { _LIBUNWIND_HIDDEN bool checkKeyMgrRegisteredFDEs(uintptr_t pc, void *&fde) { #if __MAC_OS_X_VERSION_MIN_REQUIRED // lastly check for old style keymgr registration of dynamically generated // FDEs acquire exclusive access to libgcc_object_info libgcc_object_info *head = (libgcc_object_info *) _keymgr_get_and_lock_processwide_ptr(KEYMGR_GCC3_DW2_OBJ_LIST); if (head != NULL) { // look at each FDE in keymgr for (libgcc_object *ob = head->unseen_objects; ob != NULL; ob = ob->next) { CFI_Parser<LocalAddressSpace>::FDE_Info fdeInfo; CFI_Parser<LocalAddressSpace>::CIE_Info cieInfo; const char *msg = CFI_Parser<LocalAddressSpace>::decodeFDE( LocalAddressSpace::sThisAddressSpace, (uintptr_t)ob->fde, &fdeInfo, &cieInfo); if (msg == NULL) { // Check if this FDE is for a function that includes the pc if ((fdeInfo.pcStart <= pc) && (pc < fdeInfo.pcEnd)) { fde = (void*)fdeInfo.pcStart; _keymgr_set_and_unlock_processwide_ptr(KEYMGR_GCC3_DW2_OBJ_LIST, head); return true; } } } } // release libgcc_object_info _keymgr_set_and_unlock_processwide_ptr(KEYMGR_GCC3_DW2_OBJ_LIST, head); #else (void)pc; (void)fde; #endif return false; } }
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/pal/prebuilt/idl/corprof_i.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /* this ALWAYS GENERATED file contains the IIDs and CLSIDs */ /* link this file in with the server and any clients */ /* File created by MIDL compiler version 8.01.0622 */ /* @@MIDL_FILE_HEADING( ) */ #pragma warning( disable: 4049 ) /* more than 64k source lines */ #ifdef __cplusplus extern "C"{ #endif #include <rpc.h> #include <rpcndr.h> #ifdef _MIDL_USE_GUIDDEF_ #ifndef INITGUID #define INITGUID #include <guiddef.h> #undef INITGUID #else #include <guiddef.h> #endif #define MIDL_DEFINE_GUID(type,name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) \ DEFINE_GUID(name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) #else // !_MIDL_USE_GUIDDEF_ #ifndef __IID_DEFINED__ #define __IID_DEFINED__ typedef struct _IID { unsigned long x; unsigned short s1; unsigned short s2; unsigned char c[8]; } IID; #endif // __IID_DEFINED__ #ifndef CLSID_DEFINED #define CLSID_DEFINED typedef IID CLSID; #endif // CLSID_DEFINED #define MIDL_DEFINE_GUID(type,name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) \ EXTERN_C __declspec(selectany) const type name = {l,w1,w2,{b1,b2,b3,b4,b5,b6,b7,b8}} #endif // !_MIDL_USE_GUIDDEF_ MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback,0x176FBED1,0xA55C,0x4796,0x98,0xCA,0xA9,0xDA,0x0E,0xF8,0x83,0xE7); MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback2,0x8A8CC829,0xCCF2,0x49fe,0xBB,0xAE,0x0F,0x02,0x22,0x28,0x07,0x1A); MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback3,0x4FD2ED52,0x7731,0x4b8d,0x94,0x69,0x03,0xD2,0xCC,0x30,0x86,0xC5); MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback4,0x7B63B2E3,0x107D,0x4d48,0xB2,0xF6,0xF6,0x1E,0x22,0x94,0x70,0xD2); MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback5,0x8DFBA405,0x8C9F,0x45F8,0xBF,0xFA,0x83,0xB1,0x4C,0xEF,0x78,0xB5); MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback6,0xFC13DF4B,0x4448,0x4F4F,0x95,0x0C,0xBA,0x8D,0x19,0xD0,0x0C,0x36); MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback7,0xF76A2DBA,0x1D52,0x4539,0x86,0x6C,0x2A,0xA5,0x18,0xF9,0xEF,0xC3); MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback8,0x5BED9B15,0xC079,0x4D47,0xBF,0xE2,0x21,0x5A,0x14,0x0C,0x07,0xE0); MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback9,0x27583EC3,0xC8F5,0x482F,0x80,0x52,0x19,0x4B,0x8C,0xE4,0x70,0x5A); MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback10,0xCEC5B60E,0xC69C,0x495F,0x87,0xF6,0x84,0xD2,0x8E,0xE1,0x6F,0xFB); MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback11,0x42350846,0xAAED,0x47F7,0xB1,0x28,0xFD,0x0C,0x98,0x88,0x1C,0xDE); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo,0x28B5557D,0x3F3F,0x48b4,0x90,0xB2,0x5F,0x9E,0xEA,0x2F,0x6C,0x48); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo2,0xCC0935CD,0xA518,0x487d,0xB0,0xBB,0xA9,0x32,0x14,0xE6,0x54,0x78); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo3,0xB555ED4F,0x452A,0x4E54,0x8B,0x39,0xB5,0x36,0x0B,0xAD,0x32,0xA0); MIDL_DEFINE_GUID(IID, IID_ICorProfilerObjectEnum,0x2C6269BD,0x2D13,0x4321,0xAE,0x12,0x66,0x86,0x36,0x5F,0xD6,0xAF); MIDL_DEFINE_GUID(IID, IID_ICorProfilerFunctionEnum,0xFF71301A,0xB994,0x429D,0xA1,0x0B,0xB3,0x45,0xA6,0x52,0x80,0xEF); MIDL_DEFINE_GUID(IID, IID_ICorProfilerModuleEnum,0xb0266d75,0x2081,0x4493,0xaf,0x7f,0x02,0x8b,0xa3,0x4d,0xb8,0x91); MIDL_DEFINE_GUID(IID, IID_IMethodMalloc,0xA0EFB28B,0x6EE2,0x4d7b,0xB9,0x83,0xA7,0x5E,0xF7,0xBE,0xED,0xB8); MIDL_DEFINE_GUID(IID, IID_ICorProfilerFunctionControl,0xF0963021,0xE1EA,0x4732,0x85,0x81,0xE0,0x1B,0x0B,0xD3,0xC0,0xC6); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo4,0x0d8fdcaa,0x6257,0x47bf,0xb1,0xbf,0x94,0xda,0xc8,0x84,0x66,0xee); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo5,0x07602928,0xCE38,0x4B83,0x81,0xE7,0x74,0xAD,0xAF,0x78,0x12,0x14); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo6,0xF30A070D,0xBFFB,0x46A7,0xB1,0xD8,0x87,0x81,0xEF,0x7B,0x69,0x8A); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo7,0x9AEECC0D,0x63E0,0x4187,0x8C,0x00,0xE3,0x12,0xF5,0x03,0xF6,0x63); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo8,0xC5AC80A6,0x782E,0x4716,0x80,0x44,0x39,0x59,0x8C,0x60,0xCF,0xBF); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo9,0x008170DB,0xF8CC,0x4796,0x9A,0x51,0xDC,0x8A,0xA0,0xB4,0x70,0x12); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo10,0x2F1B5152,0xC869,0x40C9,0xAA,0x5F,0x3A,0xBE,0x02,0x6B,0xD7,0x20); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo11,0x06398876,0x8987,0x4154,0xB6,0x21,0x40,0xA0,0x0D,0x6E,0x4D,0x04); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo12,0x27b24ccd,0x1cb1,0x47c5,0x96,0xee,0x98,0x19,0x0d,0xc3,0x09,0x59); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo13,0x19C4179D,0xF92C,0x4D25,0x9F,0x20,0x5F,0xEB,0xFB,0xBD,0x29,0x78); MIDL_DEFINE_GUID(IID, IID_ICorProfilerMethodEnum,0xFCCEE788,0x0088,0x454B,0xA8,0x11,0xC9,0x9F,0x29,0x8D,0x19,0x42); MIDL_DEFINE_GUID(IID, IID_ICorProfilerThreadEnum,0x571194f7,0x25ed,0x419f,0xaa,0x8b,0x70,0x16,0xb3,0x15,0x97,0x01); MIDL_DEFINE_GUID(IID, IID_ICorProfilerAssemblyReferenceProvider,0x66A78C24,0x2EEF,0x4F65,0xB4,0x5F,0xDD,0x1D,0x80,0x38,0xBF,0x3C); #undef MIDL_DEFINE_GUID #ifdef __cplusplus } #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /* this ALWAYS GENERATED file contains the IIDs and CLSIDs */ /* link this file in with the server and any clients */ /* File created by MIDL compiler version 8.01.0622 */ /* @@MIDL_FILE_HEADING( ) */ #pragma warning( disable: 4049 ) /* more than 64k source lines */ #ifdef __cplusplus extern "C"{ #endif #include <rpc.h> #include <rpcndr.h> #ifdef _MIDL_USE_GUIDDEF_ #ifndef INITGUID #define INITGUID #include <guiddef.h> #undef INITGUID #else #include <guiddef.h> #endif #define MIDL_DEFINE_GUID(type,name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) \ DEFINE_GUID(name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) #else // !_MIDL_USE_GUIDDEF_ #ifndef __IID_DEFINED__ #define __IID_DEFINED__ typedef struct _IID { unsigned long x; unsigned short s1; unsigned short s2; unsigned char c[8]; } IID; #endif // __IID_DEFINED__ #ifndef CLSID_DEFINED #define CLSID_DEFINED typedef IID CLSID; #endif // CLSID_DEFINED #define MIDL_DEFINE_GUID(type,name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) \ EXTERN_C __declspec(selectany) const type name = {l,w1,w2,{b1,b2,b3,b4,b5,b6,b7,b8}} #endif // !_MIDL_USE_GUIDDEF_ MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback,0x176FBED1,0xA55C,0x4796,0x98,0xCA,0xA9,0xDA,0x0E,0xF8,0x83,0xE7); MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback2,0x8A8CC829,0xCCF2,0x49fe,0xBB,0xAE,0x0F,0x02,0x22,0x28,0x07,0x1A); MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback3,0x4FD2ED52,0x7731,0x4b8d,0x94,0x69,0x03,0xD2,0xCC,0x30,0x86,0xC5); MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback4,0x7B63B2E3,0x107D,0x4d48,0xB2,0xF6,0xF6,0x1E,0x22,0x94,0x70,0xD2); MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback5,0x8DFBA405,0x8C9F,0x45F8,0xBF,0xFA,0x83,0xB1,0x4C,0xEF,0x78,0xB5); MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback6,0xFC13DF4B,0x4448,0x4F4F,0x95,0x0C,0xBA,0x8D,0x19,0xD0,0x0C,0x36); MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback7,0xF76A2DBA,0x1D52,0x4539,0x86,0x6C,0x2A,0xA5,0x18,0xF9,0xEF,0xC3); MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback8,0x5BED9B15,0xC079,0x4D47,0xBF,0xE2,0x21,0x5A,0x14,0x0C,0x07,0xE0); MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback9,0x27583EC3,0xC8F5,0x482F,0x80,0x52,0x19,0x4B,0x8C,0xE4,0x70,0x5A); MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback10,0xCEC5B60E,0xC69C,0x495F,0x87,0xF6,0x84,0xD2,0x8E,0xE1,0x6F,0xFB); MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback11,0x42350846,0xAAED,0x47F7,0xB1,0x28,0xFD,0x0C,0x98,0x88,0x1C,0xDE); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo,0x28B5557D,0x3F3F,0x48b4,0x90,0xB2,0x5F,0x9E,0xEA,0x2F,0x6C,0x48); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo2,0xCC0935CD,0xA518,0x487d,0xB0,0xBB,0xA9,0x32,0x14,0xE6,0x54,0x78); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo3,0xB555ED4F,0x452A,0x4E54,0x8B,0x39,0xB5,0x36,0x0B,0xAD,0x32,0xA0); MIDL_DEFINE_GUID(IID, IID_ICorProfilerObjectEnum,0x2C6269BD,0x2D13,0x4321,0xAE,0x12,0x66,0x86,0x36,0x5F,0xD6,0xAF); MIDL_DEFINE_GUID(IID, IID_ICorProfilerFunctionEnum,0xFF71301A,0xB994,0x429D,0xA1,0x0B,0xB3,0x45,0xA6,0x52,0x80,0xEF); MIDL_DEFINE_GUID(IID, IID_ICorProfilerModuleEnum,0xb0266d75,0x2081,0x4493,0xaf,0x7f,0x02,0x8b,0xa3,0x4d,0xb8,0x91); MIDL_DEFINE_GUID(IID, IID_IMethodMalloc,0xA0EFB28B,0x6EE2,0x4d7b,0xB9,0x83,0xA7,0x5E,0xF7,0xBE,0xED,0xB8); MIDL_DEFINE_GUID(IID, IID_ICorProfilerFunctionControl,0xF0963021,0xE1EA,0x4732,0x85,0x81,0xE0,0x1B,0x0B,0xD3,0xC0,0xC6); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo4,0x0d8fdcaa,0x6257,0x47bf,0xb1,0xbf,0x94,0xda,0xc8,0x84,0x66,0xee); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo5,0x07602928,0xCE38,0x4B83,0x81,0xE7,0x74,0xAD,0xAF,0x78,0x12,0x14); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo6,0xF30A070D,0xBFFB,0x46A7,0xB1,0xD8,0x87,0x81,0xEF,0x7B,0x69,0x8A); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo7,0x9AEECC0D,0x63E0,0x4187,0x8C,0x00,0xE3,0x12,0xF5,0x03,0xF6,0x63); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo8,0xC5AC80A6,0x782E,0x4716,0x80,0x44,0x39,0x59,0x8C,0x60,0xCF,0xBF); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo9,0x008170DB,0xF8CC,0x4796,0x9A,0x51,0xDC,0x8A,0xA0,0xB4,0x70,0x12); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo10,0x2F1B5152,0xC869,0x40C9,0xAA,0x5F,0x3A,0xBE,0x02,0x6B,0xD7,0x20); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo11,0x06398876,0x8987,0x4154,0xB6,0x21,0x40,0xA0,0x0D,0x6E,0x4D,0x04); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo12,0x27b24ccd,0x1cb1,0x47c5,0x96,0xee,0x98,0x19,0x0d,0xc3,0x09,0x59); MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo13,0x19C4179D,0xF92C,0x4D25,0x9F,0x20,0x5F,0xEB,0xFB,0xBD,0x29,0x78); MIDL_DEFINE_GUID(IID, IID_ICorProfilerMethodEnum,0xFCCEE788,0x0088,0x454B,0xA8,0x11,0xC9,0x9F,0x29,0x8D,0x19,0x42); MIDL_DEFINE_GUID(IID, IID_ICorProfilerThreadEnum,0x571194f7,0x25ed,0x419f,0xaa,0x8b,0x70,0x16,0xb3,0x15,0x97,0x01); MIDL_DEFINE_GUID(IID, IID_ICorProfilerAssemblyReferenceProvider,0x66A78C24,0x2EEF,0x4F65,0xB4,0x5F,0xDD,0x1D,0x80,0x38,0xBF,0x3C); #undef MIDL_DEFINE_GUID #ifdef __cplusplus } #endif
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/native/external/rapidjson/internal/biginteger.h
// Tencent is pleased to support the open source community by making RapidJSON available. // // Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. // // Licensed under the MIT License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // http://opensource.org/licenses/MIT // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #ifndef RAPIDJSON_BIGINTEGER_H_ #define RAPIDJSON_BIGINTEGER_H_ #include "../rapidjson.h" #if defined(_MSC_VER) && !__INTEL_COMPILER && defined(_M_AMD64) #include <intrin.h> // for _umul128 #pragma intrinsic(_umul128) #endif RAPIDJSON_NAMESPACE_BEGIN namespace internal { class BigInteger { public: typedef uint64_t Type; BigInteger(const BigInteger& rhs) : count_(rhs.count_) { std::memcpy(digits_, rhs.digits_, count_ * sizeof(Type)); } explicit BigInteger(uint64_t u) : count_(1) { digits_[0] = u; } BigInteger(const char* decimals, size_t length) : count_(1) { RAPIDJSON_ASSERT(length > 0); digits_[0] = 0; size_t i = 0; const size_t kMaxDigitPerIteration = 19; // 2^64 = 18446744073709551616 > 10^19 while (length >= kMaxDigitPerIteration) { AppendDecimal64(decimals + i, decimals + i + kMaxDigitPerIteration); length -= kMaxDigitPerIteration; i += kMaxDigitPerIteration; } if (length > 0) AppendDecimal64(decimals + i, decimals + i + length); } BigInteger& operator=(const BigInteger &rhs) { if (this != &rhs) { count_ = rhs.count_; std::memcpy(digits_, rhs.digits_, count_ * sizeof(Type)); } return *this; } BigInteger& operator=(uint64_t u) { digits_[0] = u; count_ = 1; return *this; } BigInteger& operator+=(uint64_t u) { Type backup = digits_[0]; digits_[0] += u; for (size_t i = 0; i < count_ - 1; i++) { if (digits_[i] >= backup) return *this; // no carry backup = digits_[i + 1]; digits_[i + 1] += 1; } // Last carry if (digits_[count_ - 1] < backup) PushBack(1); return *this; } BigInteger& operator*=(uint64_t u) { if (u == 0) return *this = 0; if (u == 1) return *this; if (*this == 1) return *this = u; uint64_t k = 0; for (size_t i = 0; i < count_; i++) { uint64_t hi; digits_[i] = MulAdd64(digits_[i], u, k, &hi); k = hi; } if (k > 0) PushBack(k); return *this; } BigInteger& operator*=(uint32_t u) { if (u == 0) return *this = 0; if (u == 1) return *this; if (*this == 1) return *this = u; uint64_t k = 0; for (size_t i = 0; i < count_; i++) { const uint64_t c = digits_[i] >> 32; const uint64_t d = digits_[i] & 0xFFFFFFFF; const uint64_t uc = u * c; const uint64_t ud = u * d; const uint64_t p0 = ud + k; const uint64_t p1 = uc + (p0 >> 32); digits_[i] = (p0 & 0xFFFFFFFF) | (p1 << 32); k = p1 >> 32; } if (k > 0) PushBack(k); return *this; } BigInteger& operator<<=(size_t shift) { if (IsZero() || shift == 0) return *this; size_t offset = shift / kTypeBit; size_t interShift = shift % kTypeBit; RAPIDJSON_ASSERT(count_ + offset <= kCapacity); if (interShift == 0) { std::memmove(digits_ + offset, digits_, count_ * sizeof(Type)); count_ += offset; } else { digits_[count_] = 0; for (size_t i = count_; i > 0; i--) digits_[i + offset] = (digits_[i] << interShift) | (digits_[i - 1] >> (kTypeBit - interShift)); digits_[offset] = digits_[0] << interShift; count_ += offset; if (digits_[count_]) count_++; } std::memset(digits_, 0, offset * sizeof(Type)); return *this; } bool operator==(const BigInteger& rhs) const { return count_ == rhs.count_ && std::memcmp(digits_, rhs.digits_, count_ * sizeof(Type)) == 0; } bool operator==(const Type rhs) const { return count_ == 1 && digits_[0] == rhs; } BigInteger& MultiplyPow5(unsigned exp) { static const uint32_t kPow5[12] = { 5, 5 * 5, 5 * 5 * 5, 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 }; if (exp == 0) return *this; for (; exp >= 27; exp -= 27) *this *= RAPIDJSON_UINT64_C2(0X6765C793, 0XFA10079D); // 5^27 for (; exp >= 13; exp -= 13) *this *= static_cast<uint32_t>(1220703125u); // 5^13 if (exp > 0) *this *= kPow5[exp - 1]; return *this; } // Compute absolute difference of this and rhs. // Assume this != rhs bool Difference(const BigInteger& rhs, BigInteger* out) const { int cmp = Compare(rhs); RAPIDJSON_ASSERT(cmp != 0); const BigInteger *a, *b; // Makes a > b bool ret; if (cmp < 0) { a = &rhs; b = this; ret = true; } else { a = this; b = &rhs; ret = false; } Type borrow = 0; for (size_t i = 0; i < a->count_; i++) { Type d = a->digits_[i] - borrow; if (i < b->count_) d -= b->digits_[i]; borrow = (d > a->digits_[i]) ? 1 : 0; out->digits_[i] = d; if (d != 0) out->count_ = i + 1; } return ret; } int Compare(const BigInteger& rhs) const { if (count_ != rhs.count_) return count_ < rhs.count_ ? -1 : 1; for (size_t i = count_; i-- > 0;) if (digits_[i] != rhs.digits_[i]) return digits_[i] < rhs.digits_[i] ? -1 : 1; return 0; } size_t GetCount() const { return count_; } Type GetDigit(size_t index) const { RAPIDJSON_ASSERT(index < count_); return digits_[index]; } bool IsZero() const { return count_ == 1 && digits_[0] == 0; } private: void AppendDecimal64(const char* begin, const char* end) { uint64_t u = ParseUint64(begin, end); if (IsZero()) *this = u; else { unsigned exp = static_cast<unsigned>(end - begin); (MultiplyPow5(exp) <<= exp) += u; // *this = *this * 10^exp + u } } void PushBack(Type digit) { RAPIDJSON_ASSERT(count_ < kCapacity); digits_[count_++] = digit; } static uint64_t ParseUint64(const char* begin, const char* end) { uint64_t r = 0; for (const char* p = begin; p != end; ++p) { RAPIDJSON_ASSERT(*p >= '0' && *p <= '9'); r = r * 10u + static_cast<unsigned>(*p - '0'); } return r; } // Assume a * b + k < 2^128 static uint64_t MulAdd64(uint64_t a, uint64_t b, uint64_t k, uint64_t* outHigh) { #if defined(_MSC_VER) && defined(_M_AMD64) uint64_t low = _umul128(a, b, outHigh) + k; if (low < k) (*outHigh)++; return low; #elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__) __extension__ typedef unsigned __int128 uint128; uint128 p = static_cast<uint128>(a) * static_cast<uint128>(b); p += k; *outHigh = static_cast<uint64_t>(p >> 64); return static_cast<uint64_t>(p); #else const uint64_t a0 = a & 0xFFFFFFFF, a1 = a >> 32, b0 = b & 0xFFFFFFFF, b1 = b >> 32; uint64_t x0 = a0 * b0, x1 = a0 * b1, x2 = a1 * b0, x3 = a1 * b1; x1 += (x0 >> 32); // can't give carry x1 += x2; if (x1 < x2) x3 += (static_cast<uint64_t>(1) << 32); uint64_t lo = (x1 << 32) + (x0 & 0xFFFFFFFF); uint64_t hi = x3 + (x1 >> 32); lo += k; if (lo < k) hi++; *outHigh = hi; return lo; #endif } static const size_t kBitCount = 3328; // 64bit * 54 > 10^1000 static const size_t kCapacity = kBitCount / sizeof(Type); static const size_t kTypeBit = sizeof(Type) * 8; Type digits_[kCapacity]; size_t count_; }; } // namespace internal RAPIDJSON_NAMESPACE_END #endif // RAPIDJSON_BIGINTEGER_H_
// Tencent is pleased to support the open source community by making RapidJSON available. // // Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. // // Licensed under the MIT License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // http://opensource.org/licenses/MIT // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #ifndef RAPIDJSON_BIGINTEGER_H_ #define RAPIDJSON_BIGINTEGER_H_ #include "../rapidjson.h" #if defined(_MSC_VER) && !__INTEL_COMPILER && defined(_M_AMD64) #include <intrin.h> // for _umul128 #pragma intrinsic(_umul128) #endif RAPIDJSON_NAMESPACE_BEGIN namespace internal { class BigInteger { public: typedef uint64_t Type; BigInteger(const BigInteger& rhs) : count_(rhs.count_) { std::memcpy(digits_, rhs.digits_, count_ * sizeof(Type)); } explicit BigInteger(uint64_t u) : count_(1) { digits_[0] = u; } BigInteger(const char* decimals, size_t length) : count_(1) { RAPIDJSON_ASSERT(length > 0); digits_[0] = 0; size_t i = 0; const size_t kMaxDigitPerIteration = 19; // 2^64 = 18446744073709551616 > 10^19 while (length >= kMaxDigitPerIteration) { AppendDecimal64(decimals + i, decimals + i + kMaxDigitPerIteration); length -= kMaxDigitPerIteration; i += kMaxDigitPerIteration; } if (length > 0) AppendDecimal64(decimals + i, decimals + i + length); } BigInteger& operator=(const BigInteger &rhs) { if (this != &rhs) { count_ = rhs.count_; std::memcpy(digits_, rhs.digits_, count_ * sizeof(Type)); } return *this; } BigInteger& operator=(uint64_t u) { digits_[0] = u; count_ = 1; return *this; } BigInteger& operator+=(uint64_t u) { Type backup = digits_[0]; digits_[0] += u; for (size_t i = 0; i < count_ - 1; i++) { if (digits_[i] >= backup) return *this; // no carry backup = digits_[i + 1]; digits_[i + 1] += 1; } // Last carry if (digits_[count_ - 1] < backup) PushBack(1); return *this; } BigInteger& operator*=(uint64_t u) { if (u == 0) return *this = 0; if (u == 1) return *this; if (*this == 1) return *this = u; uint64_t k = 0; for (size_t i = 0; i < count_; i++) { uint64_t hi; digits_[i] = MulAdd64(digits_[i], u, k, &hi); k = hi; } if (k > 0) PushBack(k); return *this; } BigInteger& operator*=(uint32_t u) { if (u == 0) return *this = 0; if (u == 1) return *this; if (*this == 1) return *this = u; uint64_t k = 0; for (size_t i = 0; i < count_; i++) { const uint64_t c = digits_[i] >> 32; const uint64_t d = digits_[i] & 0xFFFFFFFF; const uint64_t uc = u * c; const uint64_t ud = u * d; const uint64_t p0 = ud + k; const uint64_t p1 = uc + (p0 >> 32); digits_[i] = (p0 & 0xFFFFFFFF) | (p1 << 32); k = p1 >> 32; } if (k > 0) PushBack(k); return *this; } BigInteger& operator<<=(size_t shift) { if (IsZero() || shift == 0) return *this; size_t offset = shift / kTypeBit; size_t interShift = shift % kTypeBit; RAPIDJSON_ASSERT(count_ + offset <= kCapacity); if (interShift == 0) { std::memmove(digits_ + offset, digits_, count_ * sizeof(Type)); count_ += offset; } else { digits_[count_] = 0; for (size_t i = count_; i > 0; i--) digits_[i + offset] = (digits_[i] << interShift) | (digits_[i - 1] >> (kTypeBit - interShift)); digits_[offset] = digits_[0] << interShift; count_ += offset; if (digits_[count_]) count_++; } std::memset(digits_, 0, offset * sizeof(Type)); return *this; } bool operator==(const BigInteger& rhs) const { return count_ == rhs.count_ && std::memcmp(digits_, rhs.digits_, count_ * sizeof(Type)) == 0; } bool operator==(const Type rhs) const { return count_ == 1 && digits_[0] == rhs; } BigInteger& MultiplyPow5(unsigned exp) { static const uint32_t kPow5[12] = { 5, 5 * 5, 5 * 5 * 5, 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 }; if (exp == 0) return *this; for (; exp >= 27; exp -= 27) *this *= RAPIDJSON_UINT64_C2(0X6765C793, 0XFA10079D); // 5^27 for (; exp >= 13; exp -= 13) *this *= static_cast<uint32_t>(1220703125u); // 5^13 if (exp > 0) *this *= kPow5[exp - 1]; return *this; } // Compute absolute difference of this and rhs. // Assume this != rhs bool Difference(const BigInteger& rhs, BigInteger* out) const { int cmp = Compare(rhs); RAPIDJSON_ASSERT(cmp != 0); const BigInteger *a, *b; // Makes a > b bool ret; if (cmp < 0) { a = &rhs; b = this; ret = true; } else { a = this; b = &rhs; ret = false; } Type borrow = 0; for (size_t i = 0; i < a->count_; i++) { Type d = a->digits_[i] - borrow; if (i < b->count_) d -= b->digits_[i]; borrow = (d > a->digits_[i]) ? 1 : 0; out->digits_[i] = d; if (d != 0) out->count_ = i + 1; } return ret; } int Compare(const BigInteger& rhs) const { if (count_ != rhs.count_) return count_ < rhs.count_ ? -1 : 1; for (size_t i = count_; i-- > 0;) if (digits_[i] != rhs.digits_[i]) return digits_[i] < rhs.digits_[i] ? -1 : 1; return 0; } size_t GetCount() const { return count_; } Type GetDigit(size_t index) const { RAPIDJSON_ASSERT(index < count_); return digits_[index]; } bool IsZero() const { return count_ == 1 && digits_[0] == 0; } private: void AppendDecimal64(const char* begin, const char* end) { uint64_t u = ParseUint64(begin, end); if (IsZero()) *this = u; else { unsigned exp = static_cast<unsigned>(end - begin); (MultiplyPow5(exp) <<= exp) += u; // *this = *this * 10^exp + u } } void PushBack(Type digit) { RAPIDJSON_ASSERT(count_ < kCapacity); digits_[count_++] = digit; } static uint64_t ParseUint64(const char* begin, const char* end) { uint64_t r = 0; for (const char* p = begin; p != end; ++p) { RAPIDJSON_ASSERT(*p >= '0' && *p <= '9'); r = r * 10u + static_cast<unsigned>(*p - '0'); } return r; } // Assume a * b + k < 2^128 static uint64_t MulAdd64(uint64_t a, uint64_t b, uint64_t k, uint64_t* outHigh) { #if defined(_MSC_VER) && defined(_M_AMD64) uint64_t low = _umul128(a, b, outHigh) + k; if (low < k) (*outHigh)++; return low; #elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__) __extension__ typedef unsigned __int128 uint128; uint128 p = static_cast<uint128>(a) * static_cast<uint128>(b); p += k; *outHigh = static_cast<uint64_t>(p >> 64); return static_cast<uint64_t>(p); #else const uint64_t a0 = a & 0xFFFFFFFF, a1 = a >> 32, b0 = b & 0xFFFFFFFF, b1 = b >> 32; uint64_t x0 = a0 * b0, x1 = a0 * b1, x2 = a1 * b0, x3 = a1 * b1; x1 += (x0 >> 32); // can't give carry x1 += x2; if (x1 < x2) x3 += (static_cast<uint64_t>(1) << 32); uint64_t lo = (x1 << 32) + (x0 & 0xFFFFFFFF); uint64_t hi = x3 + (x1 >> 32); lo += k; if (lo < k) hi++; *outHigh = hi; return lo; #endif } static const size_t kBitCount = 3328; // 64bit * 54 > 10^1000 static const size_t kCapacity = kBitCount / sizeof(Type); static const size_t kTypeBit = sizeof(Type) * 8; Type digits_[kCapacity]; size_t count_; }; } // namespace internal RAPIDJSON_NAMESPACE_END #endif // RAPIDJSON_BIGINTEGER_H_
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/tests/GC/Scenarios/GCSimulator/GCSimulator_115.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <GCStressIncompatible>true</GCStressIncompatible> <CLRTestExecutionArguments>-t 1 -tp 0 -dz 17 -sdz 8500 -dc 10000 -sdc 5000 -lt 3 -f -dp 0.0 -dw 0.0</CLRTestExecutionArguments> <IsGCSimulatorTest>true</IsGCSimulatorTest> <CLRTestProjectToRun>GCSimulator.csproj</CLRTestProjectToRun> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="GCSimulator.cs" /> <Compile Include="lifetimefx.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <GCStressIncompatible>true</GCStressIncompatible> <CLRTestExecutionArguments>-t 1 -tp 0 -dz 17 -sdz 8500 -dc 10000 -sdc 5000 -lt 3 -f -dp 0.0 -dw 0.0</CLRTestExecutionArguments> <IsGCSimulatorTest>true</IsGCSimulatorTest> <CLRTestProjectToRun>GCSimulator.csproj</CLRTestProjectToRun> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="GCSimulator.cs" /> <Compile Include="lifetimefx.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/libraries/System.Formats.Cbor/ref/System.Formats.Cbor.netcoreapp.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ------------------------------------------------------------------------------ // Changes to this file must follow the https://aka.ms/api-review process. // ------------------------------------------------------------------------------ namespace System.Formats.Cbor { public partial class CborReader { public System.Half ReadHalf() { throw null; } } public partial class CborWriter { public void WriteHalf(System.Half value) { } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ------------------------------------------------------------------------------ // Changes to this file must follow the https://aka.ms/api-review process. // ------------------------------------------------------------------------------ namespace System.Formats.Cbor { public partial class CborReader { public System.Half ReadHalf() { throw null; } } public partial class CborWriter { public void WriteHalf(System.Half value) { } } }
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/libraries/System.Private.DataContractSerialization/src/System/Runtime/Serialization/Json/DateTimeFormat.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Globalization; namespace System.Runtime.Serialization { /// <summary> /// This class is used to customize the way DateTime is /// serialized or deserialized by <see cref="Json.DataContractJsonSerializer"/> /// </summary> public class DateTimeFormat { private readonly string _formatString; private readonly IFormatProvider _formatProvider; private DateTimeStyles _dateTimeStyles; /// <summary> /// Initializes a new <see cref="DateTimeFormat"/> with the specified /// formatString and DateTimeFormatInfo.CurrentInfo as the /// formatProvider. /// </summary> /// <param name="formatString">Specifies the formatString to be used.</param> public DateTimeFormat(string formatString) : this(formatString, DateTimeFormatInfo.CurrentInfo) { } /// <summary> /// Initializes a new <see cref="DateTimeFormat"/> with the specified /// formatString and formatProvider. /// </summary> /// <param name="formatString">Specifies the formatString to be used.</param> /// <param name="formatProvider">Specifies the formatProvider to be used.</param> public DateTimeFormat(string formatString!!, IFormatProvider formatProvider!!) { _formatString = formatString; _formatProvider = formatProvider; _dateTimeStyles = DateTimeStyles.RoundtripKind; } /// <summary> /// Gets the FormatString set on this instance. /// </summary> public string FormatString { get { return _formatString; } } /// <summary> /// Gets the FormatProvider set on this instance. /// </summary> public IFormatProvider FormatProvider { get { return _formatProvider; } } /// <summary> /// Gets or sets the <see cref="DateTimeStyles"/> on this instance. /// </summary> public DateTimeStyles DateTimeStyles { get { return _dateTimeStyles; } set { _dateTimeStyles = value; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Globalization; namespace System.Runtime.Serialization { /// <summary> /// This class is used to customize the way DateTime is /// serialized or deserialized by <see cref="Json.DataContractJsonSerializer"/> /// </summary> public class DateTimeFormat { private readonly string _formatString; private readonly IFormatProvider _formatProvider; private DateTimeStyles _dateTimeStyles; /// <summary> /// Initializes a new <see cref="DateTimeFormat"/> with the specified /// formatString and DateTimeFormatInfo.CurrentInfo as the /// formatProvider. /// </summary> /// <param name="formatString">Specifies the formatString to be used.</param> public DateTimeFormat(string formatString) : this(formatString, DateTimeFormatInfo.CurrentInfo) { } /// <summary> /// Initializes a new <see cref="DateTimeFormat"/> with the specified /// formatString and formatProvider. /// </summary> /// <param name="formatString">Specifies the formatString to be used.</param> /// <param name="formatProvider">Specifies the formatProvider to be used.</param> public DateTimeFormat(string formatString!!, IFormatProvider formatProvider!!) { _formatString = formatString; _formatProvider = formatProvider; _dateTimeStyles = DateTimeStyles.RoundtripKind; } /// <summary> /// Gets the FormatString set on this instance. /// </summary> public string FormatString { get { return _formatString; } } /// <summary> /// Gets the FormatProvider set on this instance. /// </summary> public IFormatProvider FormatProvider { get { return _formatProvider; } } /// <summary> /// Gets or sets the <see cref="DateTimeStyles"/> on this instance. /// </summary> public DateTimeStyles DateTimeStyles { get { return _dateTimeStyles; } set { _dateTimeStyles = value; } } } }
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/tests/JIT/HardwareIntrinsics/General/NotSupported/Vector64BooleanToVector128Unsafe.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\General\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void Vector64BooleanToVector128Unsafe() { bool succeeded = false; try { Vector128<bool> result = default(Vector64<bool>).ToVector128Unsafe(); } catch (NotSupportedException) { succeeded = true; } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"Vector64BooleanToVector128Unsafe: RunNotSupportedScenario failed to throw NotSupportedException."); TestLibrary.TestFramework.LogInformation(string.Empty); throw new Exception("One or more scenarios did not complete as expected."); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\General\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void Vector64BooleanToVector128Unsafe() { bool succeeded = false; try { Vector128<bool> result = default(Vector64<bool>).ToVector128Unsafe(); } catch (NotSupportedException) { succeeded = true; } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"Vector64BooleanToVector128Unsafe: RunNotSupportedScenario failed to throw NotSupportedException."); TestLibrary.TestFramework.LogInformation(string.Empty); throw new Exception("One or more scenarios did not complete as expected."); } } } }
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/native/libs/System.Security.Cryptography.Native/pal_ocsp.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "pal_crypto_types.h" #include "pal_compiler.h" #include "opensslshim.h" /* Direct shim to OCSP_REQUEST_free */ PALEXPORT void CryptoNative_OcspRequestDestroy(OCSP_REQUEST* request); /* Returns the number of bytes required to encode an OCSP_REQUEST */ PALEXPORT int32_t CryptoNative_GetOcspRequestDerSize(OCSP_REQUEST* req); /* Encodes the OCSP_REQUEST req into the destination buffer, returning the number of bytes written. */ PALEXPORT int32_t CryptoNative_EncodeOcspRequest(OCSP_REQUEST* req, uint8_t* buf); /* Direct shim to d2i_OCSP_RESPONSE */ PALEXPORT OCSP_RESPONSE* CryptoNative_DecodeOcspResponse(const uint8_t* buf, int32_t len); /* Direct shim to OCSP_RESPONSE_free */ PALEXPORT void CryptoNative_OcspResponseDestroy(OCSP_RESPONSE* response);
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "pal_crypto_types.h" #include "pal_compiler.h" #include "opensslshim.h" /* Direct shim to OCSP_REQUEST_free */ PALEXPORT void CryptoNative_OcspRequestDestroy(OCSP_REQUEST* request); /* Returns the number of bytes required to encode an OCSP_REQUEST */ PALEXPORT int32_t CryptoNative_GetOcspRequestDerSize(OCSP_REQUEST* req); /* Encodes the OCSP_REQUEST req into the destination buffer, returning the number of bytes written. */ PALEXPORT int32_t CryptoNative_EncodeOcspRequest(OCSP_REQUEST* req, uint8_t* buf); /* Direct shim to d2i_OCSP_RESPONSE */ PALEXPORT OCSP_RESPONSE* CryptoNative_DecodeOcspResponse(const uint8_t* buf, int32_t len); /* Direct shim to OCSP_RESPONSE_free */ PALEXPORT void CryptoNative_OcspResponseDestroy(OCSP_RESPONSE* response);
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/libraries/System.ServiceModel.Syndication/ref/System.ServiceModel.Syndication.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ------------------------------------------------------------------------------ // Changes to this file must follow the https://aka.ms/api-review process. // ------------------------------------------------------------------------------ namespace System.ServiceModel.Syndication { [System.Xml.Serialization.XmlRootAttribute(ElementName="feed", Namespace="http://www.w3.org/2005/Atom")] public partial class Atom10FeedFormatter : System.ServiceModel.Syndication.SyndicationFeedFormatter, System.Xml.Serialization.IXmlSerializable { public Atom10FeedFormatter() { } public Atom10FeedFormatter(System.ServiceModel.Syndication.SyndicationFeed feedToWrite) { } public Atom10FeedFormatter(System.Type feedTypeToCreate) { } protected System.Type FeedType { get { throw null; } } public bool PreserveAttributeExtensions { get { throw null; } set { } } public bool PreserveElementExtensions { get { throw null; } set { } } public override string Version { get { throw null; } } public override bool CanRead(System.Xml.XmlReader reader) { throw null; } protected override System.ServiceModel.Syndication.SyndicationFeed CreateFeedInstance() { throw null; } public override void ReadFrom(System.Xml.XmlReader reader) { } protected virtual System.ServiceModel.Syndication.SyndicationItem ReadItem(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationFeed feed) { throw null; } protected virtual System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.SyndicationItem> ReadItems(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationFeed feed, out bool areAllItemsRead) { throw null; } System.Xml.Schema.XmlSchema System.Xml.Serialization.IXmlSerializable.GetSchema() { throw null; } void System.Xml.Serialization.IXmlSerializable.ReadXml(System.Xml.XmlReader reader) { } void System.Xml.Serialization.IXmlSerializable.WriteXml(System.Xml.XmlWriter writer) { } protected virtual void WriteItem(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationItem item, System.Uri feedBaseUri) { } protected virtual void WriteItems(System.Xml.XmlWriter writer, System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.SyndicationItem> items, System.Uri feedBaseUri) { } public override void WriteTo(System.Xml.XmlWriter writer) { } } [System.Xml.Serialization.XmlRootAttribute(ElementName="feed", Namespace="http://www.w3.org/2005/Atom")] public partial class Atom10FeedFormatter<TSyndicationFeed> : System.ServiceModel.Syndication.Atom10FeedFormatter where TSyndicationFeed : System.ServiceModel.Syndication.SyndicationFeed, new() { public Atom10FeedFormatter() { } public Atom10FeedFormatter(TSyndicationFeed feedToWrite) { } protected override System.ServiceModel.Syndication.SyndicationFeed CreateFeedInstance() { throw null; } } [System.Xml.Serialization.XmlRootAttribute(ElementName="entry", Namespace="http://www.w3.org/2005/Atom")] public partial class Atom10ItemFormatter : System.ServiceModel.Syndication.SyndicationItemFormatter, System.Xml.Serialization.IXmlSerializable { public Atom10ItemFormatter() { } public Atom10ItemFormatter(System.ServiceModel.Syndication.SyndicationItem itemToWrite) { } public Atom10ItemFormatter(System.Type itemTypeToCreate) { } protected System.Type ItemType { get { throw null; } } public bool PreserveAttributeExtensions { get { throw null; } set { } } public bool PreserveElementExtensions { get { throw null; } set { } } public override string Version { get { throw null; } } public override bool CanRead(System.Xml.XmlReader reader) { throw null; } protected override System.ServiceModel.Syndication.SyndicationItem CreateItemInstance() { throw null; } public override void ReadFrom(System.Xml.XmlReader reader) { } System.Xml.Schema.XmlSchema System.Xml.Serialization.IXmlSerializable.GetSchema() { throw null; } void System.Xml.Serialization.IXmlSerializable.ReadXml(System.Xml.XmlReader reader) { } void System.Xml.Serialization.IXmlSerializable.WriteXml(System.Xml.XmlWriter writer) { } public override void WriteTo(System.Xml.XmlWriter writer) { } } [System.Xml.Serialization.XmlRootAttribute(ElementName="entry", Namespace="http://www.w3.org/2005/Atom")] public partial class Atom10ItemFormatter<TSyndicationItem> : System.ServiceModel.Syndication.Atom10ItemFormatter where TSyndicationItem : System.ServiceModel.Syndication.SyndicationItem, new() { public Atom10ItemFormatter() { } public Atom10ItemFormatter(TSyndicationItem itemToWrite) { } protected override System.ServiceModel.Syndication.SyndicationItem CreateItemInstance() { throw null; } } [System.Xml.Serialization.XmlRootAttribute(ElementName="categories", Namespace="http://www.w3.org/2007/app")] public partial class AtomPub10CategoriesDocumentFormatter : System.ServiceModel.Syndication.CategoriesDocumentFormatter, System.Xml.Serialization.IXmlSerializable { public AtomPub10CategoriesDocumentFormatter() { } public AtomPub10CategoriesDocumentFormatter(System.ServiceModel.Syndication.CategoriesDocument documentToWrite) { } public AtomPub10CategoriesDocumentFormatter(System.Type inlineDocumentType, System.Type referencedDocumentType) { } public override string Version { get { throw null; } } public override bool CanRead(System.Xml.XmlReader reader) { throw null; } protected override System.ServiceModel.Syndication.InlineCategoriesDocument CreateInlineCategoriesDocument() { throw null; } protected override System.ServiceModel.Syndication.ReferencedCategoriesDocument CreateReferencedCategoriesDocument() { throw null; } public override void ReadFrom(System.Xml.XmlReader reader) { } System.Xml.Schema.XmlSchema System.Xml.Serialization.IXmlSerializable.GetSchema() { throw null; } void System.Xml.Serialization.IXmlSerializable.ReadXml(System.Xml.XmlReader reader) { } void System.Xml.Serialization.IXmlSerializable.WriteXml(System.Xml.XmlWriter writer) { } public override void WriteTo(System.Xml.XmlWriter writer) { } } [System.Xml.Serialization.XmlRootAttribute(ElementName="service", Namespace="http://www.w3.org/2007/app")] public partial class AtomPub10ServiceDocumentFormatter : System.ServiceModel.Syndication.ServiceDocumentFormatter, System.Xml.Serialization.IXmlSerializable { public AtomPub10ServiceDocumentFormatter() { } public AtomPub10ServiceDocumentFormatter(System.ServiceModel.Syndication.ServiceDocument documentToWrite) { } public AtomPub10ServiceDocumentFormatter(System.Type documentTypeToCreate) { } public override string Version { get { throw null; } } public override bool CanRead(System.Xml.XmlReader reader) { throw null; } protected override System.ServiceModel.Syndication.ServiceDocument CreateDocumentInstance() { throw null; } public override void ReadFrom(System.Xml.XmlReader reader) { } System.Xml.Schema.XmlSchema System.Xml.Serialization.IXmlSerializable.GetSchema() { throw null; } void System.Xml.Serialization.IXmlSerializable.ReadXml(System.Xml.XmlReader reader) { } void System.Xml.Serialization.IXmlSerializable.WriteXml(System.Xml.XmlWriter writer) { } public override void WriteTo(System.Xml.XmlWriter writer) { } } [System.Xml.Serialization.XmlRootAttribute(ElementName="service", Namespace="http://www.w3.org/2007/app")] public partial class AtomPub10ServiceDocumentFormatter<TServiceDocument> : System.ServiceModel.Syndication.AtomPub10ServiceDocumentFormatter where TServiceDocument : System.ServiceModel.Syndication.ServiceDocument, new() { public AtomPub10ServiceDocumentFormatter() { } public AtomPub10ServiceDocumentFormatter(TServiceDocument documentToWrite) { } protected override System.ServiceModel.Syndication.ServiceDocument CreateDocumentInstance() { throw null; } } public abstract partial class CategoriesDocument { internal CategoriesDocument() { } public System.Collections.Generic.Dictionary<System.Xml.XmlQualifiedName, string> AttributeExtensions { get { throw null; } } public System.Uri BaseUri { get { throw null; } set { } } public System.ServiceModel.Syndication.SyndicationElementExtensionCollection ElementExtensions { get { throw null; } } public string Language { get { throw null; } set { } } public static System.ServiceModel.Syndication.InlineCategoriesDocument Create(System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationCategory> categories) { throw null; } public static System.ServiceModel.Syndication.InlineCategoriesDocument Create(System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationCategory> categories, bool isFixed, string scheme) { throw null; } public static System.ServiceModel.Syndication.ReferencedCategoriesDocument Create(System.Uri linkToCategoriesDocument) { throw null; } public System.ServiceModel.Syndication.CategoriesDocumentFormatter GetFormatter() { throw null; } public static System.ServiceModel.Syndication.CategoriesDocument Load(System.Xml.XmlReader reader) { throw null; } public void Save(System.Xml.XmlWriter writer) { } protected internal virtual bool TryParseAttribute(string name, string ns, string value, string version) { throw null; } protected internal virtual bool TryParseElement(System.Xml.XmlReader reader, string version) { throw null; } protected internal virtual void WriteAttributeExtensions(System.Xml.XmlWriter writer, string version) { } protected internal virtual void WriteElementExtensions(System.Xml.XmlWriter writer, string version) { } } [System.Runtime.Serialization.DataContractAttribute] public abstract partial class CategoriesDocumentFormatter { protected CategoriesDocumentFormatter() { } protected CategoriesDocumentFormatter(System.ServiceModel.Syndication.CategoriesDocument documentToWrite) { } public System.ServiceModel.Syndication.CategoriesDocument Document { get { throw null; } } public abstract string Version { get; } public abstract bool CanRead(System.Xml.XmlReader reader); protected virtual System.ServiceModel.Syndication.InlineCategoriesDocument CreateInlineCategoriesDocument() { throw null; } protected virtual System.ServiceModel.Syndication.ReferencedCategoriesDocument CreateReferencedCategoriesDocument() { throw null; } public abstract void ReadFrom(System.Xml.XmlReader reader); protected virtual void SetDocument(System.ServiceModel.Syndication.CategoriesDocument document) { } public abstract void WriteTo(System.Xml.XmlWriter writer); } public partial class InlineCategoriesDocument : System.ServiceModel.Syndication.CategoriesDocument { public InlineCategoriesDocument() { } public InlineCategoriesDocument(System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.SyndicationCategory> categories) { } public InlineCategoriesDocument(System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.SyndicationCategory> categories, bool isFixed, string scheme) { } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationCategory> Categories { get { throw null; } } public bool IsFixed { get { throw null; } set { } } public string Scheme { get { throw null; } set { } } protected internal virtual System.ServiceModel.Syndication.SyndicationCategory CreateCategory() { throw null; } } public partial class ReferencedCategoriesDocument : System.ServiceModel.Syndication.CategoriesDocument { public ReferencedCategoriesDocument() { } public ReferencedCategoriesDocument(System.Uri link) { } public System.Uri Link { get { throw null; } set { } } } public partial class ResourceCollectionInfo { public ResourceCollectionInfo() { } public ResourceCollectionInfo(System.ServiceModel.Syndication.TextSyndicationContent title, System.Uri link) { } public ResourceCollectionInfo(System.ServiceModel.Syndication.TextSyndicationContent title, System.Uri link, System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.CategoriesDocument> categories, bool allowsNewEntries) { } public ResourceCollectionInfo(System.ServiceModel.Syndication.TextSyndicationContent title, System.Uri link, System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.CategoriesDocument> categories, System.Collections.Generic.IEnumerable<string> accepts) { } public ResourceCollectionInfo(string title, System.Uri link) { } public System.Collections.ObjectModel.Collection<string> Accepts { get { throw null; } } public System.Collections.Generic.Dictionary<System.Xml.XmlQualifiedName, string> AttributeExtensions { get { throw null; } } public System.Uri BaseUri { get { throw null; } set { } } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.CategoriesDocument> Categories { get { throw null; } } public System.ServiceModel.Syndication.SyndicationElementExtensionCollection ElementExtensions { get { throw null; } } public System.Uri Link { get { throw null; } set { } } public System.ServiceModel.Syndication.TextSyndicationContent Title { get { throw null; } set { } } protected internal virtual System.ServiceModel.Syndication.InlineCategoriesDocument CreateInlineCategoriesDocument() { throw null; } protected internal virtual System.ServiceModel.Syndication.ReferencedCategoriesDocument CreateReferencedCategoriesDocument() { throw null; } protected internal virtual bool TryParseAttribute(string name, string ns, string value, string version) { throw null; } protected internal virtual bool TryParseElement(System.Xml.XmlReader reader, string version) { throw null; } protected internal virtual void WriteAttributeExtensions(System.Xml.XmlWriter writer, string version) { } protected internal virtual void WriteElementExtensions(System.Xml.XmlWriter writer, string version) { } } [System.Xml.Serialization.XmlRootAttribute(ElementName="rss", Namespace="")] public partial class Rss20FeedFormatter : System.ServiceModel.Syndication.SyndicationFeedFormatter, System.Xml.Serialization.IXmlSerializable { public Rss20FeedFormatter() { } public Rss20FeedFormatter(System.ServiceModel.Syndication.SyndicationFeed feedToWrite) { } public Rss20FeedFormatter(System.ServiceModel.Syndication.SyndicationFeed feedToWrite, bool serializeExtensionsAsAtom) { } public Rss20FeedFormatter(System.Type feedTypeToCreate) { } protected System.Type FeedType { get { throw null; } } public bool PreserveAttributeExtensions { get { throw null; } set { } } public bool PreserveElementExtensions { get { throw null; } set { } } public bool SerializeExtensionsAsAtom { get { throw null; } set { } } public override string Version { get { throw null; } } public override bool CanRead(System.Xml.XmlReader reader) { throw null; } protected override System.ServiceModel.Syndication.SyndicationFeed CreateFeedInstance() { throw null; } public override void ReadFrom(System.Xml.XmlReader reader) { } protected virtual System.ServiceModel.Syndication.SyndicationItem ReadItem(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationFeed feed) { throw null; } protected virtual System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.SyndicationItem> ReadItems(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationFeed feed, out bool areAllItemsRead) { throw null; } protected internal override void SetFeed(System.ServiceModel.Syndication.SyndicationFeed feed) { } System.Xml.Schema.XmlSchema System.Xml.Serialization.IXmlSerializable.GetSchema() { throw null; } void System.Xml.Serialization.IXmlSerializable.ReadXml(System.Xml.XmlReader reader) { } void System.Xml.Serialization.IXmlSerializable.WriteXml(System.Xml.XmlWriter writer) { } protected virtual void WriteItem(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationItem item, System.Uri feedBaseUri) { } protected virtual void WriteItems(System.Xml.XmlWriter writer, System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.SyndicationItem> items, System.Uri feedBaseUri) { } public override void WriteTo(System.Xml.XmlWriter writer) { } } [System.Xml.Serialization.XmlRootAttribute(ElementName="rss", Namespace="")] public partial class Rss20FeedFormatter<TSyndicationFeed> : System.ServiceModel.Syndication.Rss20FeedFormatter where TSyndicationFeed : System.ServiceModel.Syndication.SyndicationFeed, new() { public Rss20FeedFormatter() { } public Rss20FeedFormatter(TSyndicationFeed feedToWrite) { } public Rss20FeedFormatter(TSyndicationFeed feedToWrite, bool serializeExtensionsAsAtom) { } protected override System.ServiceModel.Syndication.SyndicationFeed CreateFeedInstance() { throw null; } } [System.Xml.Serialization.XmlRootAttribute(ElementName="item", Namespace="")] public partial class Rss20ItemFormatter : System.ServiceModel.Syndication.SyndicationItemFormatter, System.Xml.Serialization.IXmlSerializable { public Rss20ItemFormatter() { } public Rss20ItemFormatter(System.ServiceModel.Syndication.SyndicationItem itemToWrite) { } public Rss20ItemFormatter(System.ServiceModel.Syndication.SyndicationItem itemToWrite, bool serializeExtensionsAsAtom) { } public Rss20ItemFormatter(System.Type itemTypeToCreate) { } protected System.Type ItemType { get { throw null; } } public bool PreserveAttributeExtensions { get { throw null; } set { } } public bool PreserveElementExtensions { get { throw null; } set { } } public bool SerializeExtensionsAsAtom { get { throw null; } set { } } public override string Version { get { throw null; } } public override bool CanRead(System.Xml.XmlReader reader) { throw null; } protected override System.ServiceModel.Syndication.SyndicationItem CreateItemInstance() { throw null; } public override void ReadFrom(System.Xml.XmlReader reader) { } System.Xml.Schema.XmlSchema System.Xml.Serialization.IXmlSerializable.GetSchema() { throw null; } void System.Xml.Serialization.IXmlSerializable.ReadXml(System.Xml.XmlReader reader) { } void System.Xml.Serialization.IXmlSerializable.WriteXml(System.Xml.XmlWriter writer) { } public override void WriteTo(System.Xml.XmlWriter writer) { } } [System.Xml.Serialization.XmlRootAttribute(ElementName="item", Namespace="")] public partial class Rss20ItemFormatter<TSyndicationItem> : System.ServiceModel.Syndication.Rss20ItemFormatter, System.Xml.Serialization.IXmlSerializable where TSyndicationItem : System.ServiceModel.Syndication.SyndicationItem, new() { public Rss20ItemFormatter() { } public Rss20ItemFormatter(TSyndicationItem itemToWrite) { } public Rss20ItemFormatter(TSyndicationItem itemToWrite, bool serializeExtensionsAsAtom) { } protected override System.ServiceModel.Syndication.SyndicationItem CreateItemInstance() { throw null; } } public partial class ServiceDocument { public ServiceDocument() { } public ServiceDocument(System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.Workspace> workspaces) { } public System.Collections.Generic.Dictionary<System.Xml.XmlQualifiedName, string> AttributeExtensions { get { throw null; } } public System.Uri BaseUri { get { throw null; } set { } } public System.ServiceModel.Syndication.SyndicationElementExtensionCollection ElementExtensions { get { throw null; } } public string Language { get { throw null; } set { } } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.Workspace> Workspaces { get { throw null; } } protected internal virtual System.ServiceModel.Syndication.Workspace CreateWorkspace() { throw null; } public System.ServiceModel.Syndication.ServiceDocumentFormatter GetFormatter() { throw null; } public static System.ServiceModel.Syndication.ServiceDocument Load(System.Xml.XmlReader reader) { throw null; } public static TServiceDocument Load<TServiceDocument>(System.Xml.XmlReader reader) where TServiceDocument : System.ServiceModel.Syndication.ServiceDocument, new() { throw null; } public void Save(System.Xml.XmlWriter writer) { } protected internal virtual bool TryParseAttribute(string name, string ns, string value, string version) { throw null; } protected internal virtual bool TryParseElement(System.Xml.XmlReader reader, string version) { throw null; } protected internal virtual void WriteAttributeExtensions(System.Xml.XmlWriter writer, string version) { } protected internal virtual void WriteElementExtensions(System.Xml.XmlWriter writer, string version) { } } [System.Runtime.Serialization.DataContractAttribute] public abstract partial class ServiceDocumentFormatter { protected ServiceDocumentFormatter() { } protected ServiceDocumentFormatter(System.ServiceModel.Syndication.ServiceDocument documentToWrite) { } public System.ServiceModel.Syndication.ServiceDocument Document { get { throw null; } } public abstract string Version { get; } public abstract bool CanRead(System.Xml.XmlReader reader); protected static System.ServiceModel.Syndication.SyndicationCategory CreateCategory(System.ServiceModel.Syndication.InlineCategoriesDocument inlineCategories) { throw null; } protected static System.ServiceModel.Syndication.ResourceCollectionInfo CreateCollection(System.ServiceModel.Syndication.Workspace workspace) { throw null; } protected virtual System.ServiceModel.Syndication.ServiceDocument CreateDocumentInstance() { throw null; } protected static System.ServiceModel.Syndication.InlineCategoriesDocument CreateInlineCategories(System.ServiceModel.Syndication.ResourceCollectionInfo collection) { throw null; } protected static System.ServiceModel.Syndication.ReferencedCategoriesDocument CreateReferencedCategories(System.ServiceModel.Syndication.ResourceCollectionInfo collection) { throw null; } protected static System.ServiceModel.Syndication.Workspace CreateWorkspace(System.ServiceModel.Syndication.ServiceDocument document) { throw null; } protected static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.CategoriesDocument categories, int maxExtensionSize) { } protected static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.ResourceCollectionInfo collection, int maxExtensionSize) { } protected static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.ServiceDocument document, int maxExtensionSize) { } protected static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.Workspace workspace, int maxExtensionSize) { } public abstract void ReadFrom(System.Xml.XmlReader reader); protected virtual void SetDocument(System.ServiceModel.Syndication.ServiceDocument document) { } protected static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.CategoriesDocument categories, string version) { throw null; } protected static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.ResourceCollectionInfo collection, string version) { throw null; } protected static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.ServiceDocument document, string version) { throw null; } protected static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.Workspace workspace, string version) { throw null; } protected static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.CategoriesDocument categories, string version) { throw null; } protected static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.ResourceCollectionInfo collection, string version) { throw null; } protected static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.ServiceDocument document, string version) { throw null; } protected static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.Workspace workspace, string version) { throw null; } protected static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.CategoriesDocument categories, string version) { } protected static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.ResourceCollectionInfo collection, string version) { } protected static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.ServiceDocument document, string version) { } protected static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.Workspace workspace, string version) { } protected static void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.CategoriesDocument categories, string version) { } protected static void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.ResourceCollectionInfo collection, string version) { } protected static void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.ServiceDocument document, string version) { } protected static void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.Workspace workspace, string version) { } public abstract void WriteTo(System.Xml.XmlWriter writer); } public partial class SyndicationCategory { public SyndicationCategory() { } protected SyndicationCategory(System.ServiceModel.Syndication.SyndicationCategory source) { } public SyndicationCategory(string name) { } public SyndicationCategory(string name, string scheme, string label) { } public System.Collections.Generic.Dictionary<System.Xml.XmlQualifiedName, string> AttributeExtensions { get { throw null; } } public System.ServiceModel.Syndication.SyndicationElementExtensionCollection ElementExtensions { get { throw null; } } public string Label { get { throw null; } set { } } public string Name { get { throw null; } set { } } public string Scheme { get { throw null; } set { } } public virtual System.ServiceModel.Syndication.SyndicationCategory Clone() { throw null; } protected internal virtual bool TryParseAttribute(string name, string ns, string value, string version) { throw null; } protected internal virtual bool TryParseElement(System.Xml.XmlReader reader, string version) { throw null; } protected internal virtual void WriteAttributeExtensions(System.Xml.XmlWriter writer, string version) { } protected internal virtual void WriteElementExtensions(System.Xml.XmlWriter writer, string version) { } } public abstract partial class SyndicationContent { protected SyndicationContent() { } protected SyndicationContent(System.ServiceModel.Syndication.SyndicationContent source) { } public System.Collections.Generic.Dictionary<System.Xml.XmlQualifiedName, string> AttributeExtensions { get { throw null; } } public abstract string Type { get; } public abstract System.ServiceModel.Syndication.SyndicationContent Clone(); public static System.ServiceModel.Syndication.TextSyndicationContent CreateHtmlContent(string content) { throw null; } public static System.ServiceModel.Syndication.TextSyndicationContent CreatePlaintextContent(string content) { throw null; } public static System.ServiceModel.Syndication.UrlSyndicationContent CreateUrlContent(System.Uri url, string mediaType) { throw null; } public static System.ServiceModel.Syndication.TextSyndicationContent CreateXhtmlContent(string content) { throw null; } public static System.ServiceModel.Syndication.XmlSyndicationContent CreateXmlContent(object dataContractObject) { throw null; } public static System.ServiceModel.Syndication.XmlSyndicationContent CreateXmlContent(object dataContractObject, System.Runtime.Serialization.XmlObjectSerializer dataContractSerializer) { throw null; } public static System.ServiceModel.Syndication.XmlSyndicationContent CreateXmlContent(object xmlSerializerObject, System.Xml.Serialization.XmlSerializer serializer) { throw null; } public static System.ServiceModel.Syndication.XmlSyndicationContent CreateXmlContent(System.Xml.XmlReader xmlReader) { throw null; } protected abstract void WriteContentsTo(System.Xml.XmlWriter writer); public void WriteTo(System.Xml.XmlWriter writer, string outerElementName, string outerElementNamespace) { } } public partial class SyndicationElementExtension { public SyndicationElementExtension(object dataContractExtension) { } public SyndicationElementExtension(object dataContractExtension, System.Runtime.Serialization.XmlObjectSerializer dataContractSerializer) { } public SyndicationElementExtension(object xmlSerializerExtension, System.Xml.Serialization.XmlSerializer serializer) { } public SyndicationElementExtension(string outerName, string outerNamespace, object dataContractExtension) { } public SyndicationElementExtension(string outerName, string outerNamespace, object dataContractExtension, System.Runtime.Serialization.XmlObjectSerializer dataContractSerializer) { } public SyndicationElementExtension(System.Xml.XmlReader xmlReader) { } public string OuterName { get { throw null; } } public string OuterNamespace { get { throw null; } } public TExtension GetObject<TExtension>() { throw null; } public TExtension GetObject<TExtension>(System.Runtime.Serialization.XmlObjectSerializer serializer) { throw null; } public TExtension GetObject<TExtension>(System.Xml.Serialization.XmlSerializer serializer) { throw null; } public System.Xml.XmlReader GetReader() { throw null; } public void WriteTo(System.Xml.XmlWriter writer) { } } public sealed partial class SyndicationElementExtensionCollection : System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationElementExtension> { internal SyndicationElementExtensionCollection() { } public void Add(object extension) { } public void Add(object dataContractExtension, System.Runtime.Serialization.DataContractSerializer serializer) { } public void Add(object xmlSerializerExtension, System.Xml.Serialization.XmlSerializer serializer) { } public void Add(string outerName, string outerNamespace, object dataContractExtension) { } public void Add(string outerName, string outerNamespace, object dataContractExtension, System.Runtime.Serialization.XmlObjectSerializer dataContractSerializer) { } public void Add(System.Xml.XmlReader xmlReader) { } protected override void ClearItems() { } public System.Xml.XmlReader GetReaderAtElementExtensions() { throw null; } protected override void InsertItem(int index, System.ServiceModel.Syndication.SyndicationElementExtension item) { } public System.Collections.ObjectModel.Collection<TExtension> ReadElementExtensions<TExtension>(string extensionName, string extensionNamespace) { throw null; } public System.Collections.ObjectModel.Collection<TExtension> ReadElementExtensions<TExtension>(string extensionName, string extensionNamespace, System.Runtime.Serialization.XmlObjectSerializer serializer) { throw null; } public System.Collections.ObjectModel.Collection<TExtension> ReadElementExtensions<TExtension>(string extensionName, string extensionNamespace, System.Xml.Serialization.XmlSerializer serializer) { throw null; } protected override void RemoveItem(int index) { } protected override void SetItem(int index, System.ServiceModel.Syndication.SyndicationElementExtension item) { } } public partial class SyndicationFeed { public SyndicationFeed() { } public SyndicationFeed(System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.SyndicationItem> items) { } protected SyndicationFeed(System.ServiceModel.Syndication.SyndicationFeed source, bool cloneItems) { } public SyndicationFeed(string title, string description, System.Uri feedAlternateLink) { } public SyndicationFeed(string title, string description, System.Uri feedAlternateLink, System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.SyndicationItem> items) { } public SyndicationFeed(string title, string description, System.Uri feedAlternateLink, string id, System.DateTimeOffset lastUpdatedTime) { } public SyndicationFeed(string title, string description, System.Uri feedAlternateLink, string id, System.DateTimeOffset lastUpdatedTime, System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.SyndicationItem> items) { } public System.Collections.Generic.Dictionary<System.Xml.XmlQualifiedName, string> AttributeExtensions { get { throw null; } } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationPerson> Authors { get { throw null; } } public System.Uri BaseUri { get { throw null; } set { } } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationCategory> Categories { get { throw null; } } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationPerson> Contributors { get { throw null; } } public System.ServiceModel.Syndication.TextSyndicationContent Copyright { get { throw null; } set { } } public System.ServiceModel.Syndication.TextSyndicationContent Description { get { throw null; } set { } } public System.ServiceModel.Syndication.SyndicationElementExtensionCollection ElementExtensions { get { throw null; } } public string Generator { get { throw null; } set { } } public string Id { get { throw null; } set { } } public System.Uri ImageUrl { get { throw null; } set { } } public System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.SyndicationItem> Items { get { throw null; } set { } } public string Language { get { throw null; } set { } } public System.DateTimeOffset LastUpdatedTime { get { throw null; } set { } } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationLink> Links { get { throw null; } } public System.ServiceModel.Syndication.TextSyndicationContent Title { get { throw null; } set { } } public virtual System.ServiceModel.Syndication.SyndicationFeed Clone(bool cloneItems) { throw null; } protected internal virtual System.ServiceModel.Syndication.SyndicationCategory CreateCategory() { throw null; } protected internal virtual System.ServiceModel.Syndication.SyndicationItem CreateItem() { throw null; } protected internal virtual System.ServiceModel.Syndication.SyndicationLink CreateLink() { throw null; } protected internal virtual System.ServiceModel.Syndication.SyndicationPerson CreatePerson() { throw null; } public System.ServiceModel.Syndication.Atom10FeedFormatter GetAtom10Formatter() { throw null; } public System.ServiceModel.Syndication.Rss20FeedFormatter GetRss20Formatter() { throw null; } public System.ServiceModel.Syndication.Rss20FeedFormatter GetRss20Formatter(bool serializeExtensionsAsAtom) { throw null; } public static System.ServiceModel.Syndication.SyndicationFeed Load(System.Xml.XmlReader reader) { throw null; } public static TSyndicationFeed Load<TSyndicationFeed>(System.Xml.XmlReader reader) where TSyndicationFeed : System.ServiceModel.Syndication.SyndicationFeed, new() { throw null; } public void SaveAsAtom10(System.Xml.XmlWriter writer) { } public void SaveAsRss20(System.Xml.XmlWriter writer) { } protected internal virtual bool TryParseAttribute(string name, string ns, string value, string version) { throw null; } protected internal virtual bool TryParseElement(System.Xml.XmlReader reader, string version) { throw null; } protected internal virtual void WriteAttributeExtensions(System.Xml.XmlWriter writer, string version) { } protected internal virtual void WriteElementExtensions(System.Xml.XmlWriter writer, string version) { } } [System.Runtime.Serialization.DataContractAttribute] public abstract partial class SyndicationFeedFormatter { protected SyndicationFeedFormatter() { } protected SyndicationFeedFormatter(System.ServiceModel.Syndication.SyndicationFeed feedToWrite) { } public System.ServiceModel.Syndication.SyndicationFeed Feed { get { throw null; } } public abstract string Version { get; } public abstract bool CanRead(System.Xml.XmlReader reader); protected internal static System.ServiceModel.Syndication.SyndicationCategory CreateCategory(System.ServiceModel.Syndication.SyndicationFeed feed) { throw null; } protected internal static System.ServiceModel.Syndication.SyndicationCategory CreateCategory(System.ServiceModel.Syndication.SyndicationItem item) { throw null; } protected abstract System.ServiceModel.Syndication.SyndicationFeed CreateFeedInstance(); protected internal static System.ServiceModel.Syndication.SyndicationItem CreateItem(System.ServiceModel.Syndication.SyndicationFeed feed) { throw null; } protected internal static System.ServiceModel.Syndication.SyndicationLink CreateLink(System.ServiceModel.Syndication.SyndicationFeed feed) { throw null; } protected internal static System.ServiceModel.Syndication.SyndicationLink CreateLink(System.ServiceModel.Syndication.SyndicationItem item) { throw null; } protected internal static System.ServiceModel.Syndication.SyndicationPerson CreatePerson(System.ServiceModel.Syndication.SyndicationFeed feed) { throw null; } protected internal static System.ServiceModel.Syndication.SyndicationPerson CreatePerson(System.ServiceModel.Syndication.SyndicationItem item) { throw null; } protected internal static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationCategory category, int maxExtensionSize) { } protected internal static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationFeed feed, int maxExtensionSize) { } protected internal static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationItem item, int maxExtensionSize) { } protected internal static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationLink link, int maxExtensionSize) { } protected internal static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationPerson person, int maxExtensionSize) { } public abstract void ReadFrom(System.Xml.XmlReader reader); protected internal virtual void SetFeed(System.ServiceModel.Syndication.SyndicationFeed feed) { } public override string ToString() { throw null; } protected internal static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.SyndicationCategory category, string version) { throw null; } protected internal static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.SyndicationFeed feed, string version) { throw null; } protected internal static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.SyndicationItem item, string version) { throw null; } protected internal static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.SyndicationLink link, string version) { throw null; } protected internal static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.SyndicationPerson person, string version) { throw null; } protected internal static bool TryParseContent(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationItem item, string contentType, string version, out System.ServiceModel.Syndication.SyndicationContent content) { throw null; } protected internal static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationCategory category, string version) { throw null; } protected internal static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationFeed feed, string version) { throw null; } protected internal static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationItem item, string version) { throw null; } protected internal static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationLink link, string version) { throw null; } protected internal static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationPerson person, string version) { throw null; } protected internal static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationCategory category, string version) { } protected internal static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationFeed feed, string version) { } protected internal static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationItem item, string version) { } protected internal static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationLink link, string version) { } protected internal static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationPerson person, string version) { } protected internal static void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationCategory category, string version) { } protected internal static void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationFeed feed, string version) { } protected internal static void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationItem item, string version) { } protected internal static void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationLink link, string version) { } protected internal static void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationPerson person, string version) { } public abstract void WriteTo(System.Xml.XmlWriter writer); } public partial class SyndicationItem { public SyndicationItem() { } protected SyndicationItem(System.ServiceModel.Syndication.SyndicationItem source) { } public SyndicationItem(string title, System.ServiceModel.Syndication.SyndicationContent content, System.Uri itemAlternateLink, string id, System.DateTimeOffset lastUpdatedTime) { } public SyndicationItem(string title, string content, System.Uri itemAlternateLink) { } public SyndicationItem(string title, string content, System.Uri itemAlternateLink, string id, System.DateTimeOffset lastUpdatedTime) { } public System.Collections.Generic.Dictionary<System.Xml.XmlQualifiedName, string> AttributeExtensions { get { throw null; } } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationPerson> Authors { get { throw null; } } public System.Uri BaseUri { get { throw null; } set { } } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationCategory> Categories { get { throw null; } } public System.ServiceModel.Syndication.SyndicationContent Content { get { throw null; } set { } } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationPerson> Contributors { get { throw null; } } public System.ServiceModel.Syndication.TextSyndicationContent Copyright { get { throw null; } set { } } public System.ServiceModel.Syndication.SyndicationElementExtensionCollection ElementExtensions { get { throw null; } } public string Id { get { throw null; } set { } } public System.DateTimeOffset LastUpdatedTime { get { throw null; } set { } } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationLink> Links { get { throw null; } } public System.DateTimeOffset PublishDate { get { throw null; } set { } } public System.ServiceModel.Syndication.SyndicationFeed SourceFeed { get { throw null; } set { } } public System.ServiceModel.Syndication.TextSyndicationContent Summary { get { throw null; } set { } } public System.ServiceModel.Syndication.TextSyndicationContent Title { get { throw null; } set { } } public void AddPermalink(System.Uri permalink) { } public virtual System.ServiceModel.Syndication.SyndicationItem Clone() { throw null; } protected internal virtual System.ServiceModel.Syndication.SyndicationCategory CreateCategory() { throw null; } protected internal virtual System.ServiceModel.Syndication.SyndicationLink CreateLink() { throw null; } protected internal virtual System.ServiceModel.Syndication.SyndicationPerson CreatePerson() { throw null; } public System.ServiceModel.Syndication.Atom10ItemFormatter GetAtom10Formatter() { throw null; } public System.ServiceModel.Syndication.Rss20ItemFormatter GetRss20Formatter() { throw null; } public System.ServiceModel.Syndication.Rss20ItemFormatter GetRss20Formatter(bool serializeExtensionsAsAtom) { throw null; } public static System.ServiceModel.Syndication.SyndicationItem Load(System.Xml.XmlReader reader) { throw null; } public static TSyndicationItem Load<TSyndicationItem>(System.Xml.XmlReader reader) where TSyndicationItem : System.ServiceModel.Syndication.SyndicationItem, new() { throw null; } public void SaveAsAtom10(System.Xml.XmlWriter writer) { } public void SaveAsRss20(System.Xml.XmlWriter writer) { } protected internal virtual bool TryParseAttribute(string name, string ns, string value, string version) { throw null; } protected internal virtual bool TryParseContent(System.Xml.XmlReader reader, string contentType, string version, out System.ServiceModel.Syndication.SyndicationContent content) { throw null; } protected internal virtual bool TryParseElement(System.Xml.XmlReader reader, string version) { throw null; } protected internal virtual void WriteAttributeExtensions(System.Xml.XmlWriter writer, string version) { } protected internal virtual void WriteElementExtensions(System.Xml.XmlWriter writer, string version) { } } [System.Runtime.Serialization.DataContractAttribute] public abstract partial class SyndicationItemFormatter { protected SyndicationItemFormatter() { } protected SyndicationItemFormatter(System.ServiceModel.Syndication.SyndicationItem itemToWrite) { } public System.ServiceModel.Syndication.SyndicationItem Item { get { throw null; } } public abstract string Version { get; } public abstract bool CanRead(System.Xml.XmlReader reader); protected static System.ServiceModel.Syndication.SyndicationCategory CreateCategory(System.ServiceModel.Syndication.SyndicationItem item) { throw null; } protected abstract System.ServiceModel.Syndication.SyndicationItem CreateItemInstance(); protected static System.ServiceModel.Syndication.SyndicationLink CreateLink(System.ServiceModel.Syndication.SyndicationItem item) { throw null; } protected static System.ServiceModel.Syndication.SyndicationPerson CreatePerson(System.ServiceModel.Syndication.SyndicationItem item) { throw null; } protected static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationCategory category, int maxExtensionSize) { } protected static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationItem item, int maxExtensionSize) { } protected static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationLink link, int maxExtensionSize) { } protected static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationPerson person, int maxExtensionSize) { } public abstract void ReadFrom(System.Xml.XmlReader reader); protected internal virtual void SetItem(System.ServiceModel.Syndication.SyndicationItem item) { } public override string ToString() { throw null; } protected static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.SyndicationCategory category, string version) { throw null; } protected static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.SyndicationItem item, string version) { throw null; } protected static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.SyndicationLink link, string version) { throw null; } protected static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.SyndicationPerson person, string version) { throw null; } protected static bool TryParseContent(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationItem item, string contentType, string version, out System.ServiceModel.Syndication.SyndicationContent content) { throw null; } protected static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationCategory category, string version) { throw null; } protected static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationItem item, string version) { throw null; } protected static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationLink link, string version) { throw null; } protected static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationPerson person, string version) { throw null; } protected static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationCategory category, string version) { } protected static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationItem item, string version) { } protected static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationLink link, string version) { } protected static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationPerson person, string version) { } protected void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationCategory category, string version) { } protected static void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationItem item, string version) { } protected void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationLink link, string version) { } protected void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationPerson person, string version) { } public abstract void WriteTo(System.Xml.XmlWriter writer); } public partial class SyndicationLink { public SyndicationLink() { } protected SyndicationLink(System.ServiceModel.Syndication.SyndicationLink source) { } public SyndicationLink(System.Uri uri) { } public SyndicationLink(System.Uri uri, string relationshipType, string title, string mediaType, long length) { } public System.Collections.Generic.Dictionary<System.Xml.XmlQualifiedName, string> AttributeExtensions { get { throw null; } } public System.Uri BaseUri { get { throw null; } set { } } public System.ServiceModel.Syndication.SyndicationElementExtensionCollection ElementExtensions { get { throw null; } } public long Length { get { throw null; } set { } } public string MediaType { get { throw null; } set { } } public string RelationshipType { get { throw null; } set { } } public string Title { get { throw null; } set { } } public System.Uri Uri { get { throw null; } set { } } public virtual System.ServiceModel.Syndication.SyndicationLink Clone() { throw null; } public static System.ServiceModel.Syndication.SyndicationLink CreateAlternateLink(System.Uri uri) { throw null; } public static System.ServiceModel.Syndication.SyndicationLink CreateAlternateLink(System.Uri uri, string mediaType) { throw null; } public static System.ServiceModel.Syndication.SyndicationLink CreateMediaEnclosureLink(System.Uri uri, string mediaType, long length) { throw null; } public static System.ServiceModel.Syndication.SyndicationLink CreateSelfLink(System.Uri uri) { throw null; } public static System.ServiceModel.Syndication.SyndicationLink CreateSelfLink(System.Uri uri, string mediaType) { throw null; } public System.Uri GetAbsoluteUri() { throw null; } protected internal virtual bool TryParseAttribute(string name, string ns, string value, string version) { throw null; } protected internal virtual bool TryParseElement(System.Xml.XmlReader reader, string version) { throw null; } protected internal virtual void WriteAttributeExtensions(System.Xml.XmlWriter writer, string version) { } protected internal virtual void WriteElementExtensions(System.Xml.XmlWriter writer, string version) { } } public partial class SyndicationPerson { public SyndicationPerson() { } protected SyndicationPerson(System.ServiceModel.Syndication.SyndicationPerson source) { } public SyndicationPerson(string email) { } public SyndicationPerson(string email, string name, string uri) { } public System.Collections.Generic.Dictionary<System.Xml.XmlQualifiedName, string> AttributeExtensions { get { throw null; } } public System.ServiceModel.Syndication.SyndicationElementExtensionCollection ElementExtensions { get { throw null; } } public string Email { get { throw null; } set { } } public string Name { get { throw null; } set { } } public string Uri { get { throw null; } set { } } public virtual System.ServiceModel.Syndication.SyndicationPerson Clone() { throw null; } protected internal virtual bool TryParseAttribute(string name, string ns, string value, string version) { throw null; } protected internal virtual bool TryParseElement(System.Xml.XmlReader reader, string version) { throw null; } protected internal virtual void WriteAttributeExtensions(System.Xml.XmlWriter writer, string version) { } protected internal virtual void WriteElementExtensions(System.Xml.XmlWriter writer, string version) { } } public static partial class SyndicationVersions { public const string Atom10 = "Atom10"; public const string Rss20 = "Rss20"; } public partial class TextSyndicationContent : System.ServiceModel.Syndication.SyndicationContent { protected TextSyndicationContent(System.ServiceModel.Syndication.TextSyndicationContent source) { } public TextSyndicationContent(string text) { } public TextSyndicationContent(string text, System.ServiceModel.Syndication.TextSyndicationContentKind textKind) { } public string Text { get { throw null; } } public override string Type { get { throw null; } } public override System.ServiceModel.Syndication.SyndicationContent Clone() { throw null; } protected override void WriteContentsTo(System.Xml.XmlWriter writer) { } } public enum TextSyndicationContentKind { Plaintext = 0, Html = 1, XHtml = 2, } public partial class UrlSyndicationContent : System.ServiceModel.Syndication.SyndicationContent { protected UrlSyndicationContent(System.ServiceModel.Syndication.UrlSyndicationContent source) { } public UrlSyndicationContent(System.Uri url, string mediaType) { } public override string Type { get { throw null; } } public System.Uri Url { get { throw null; } } public override System.ServiceModel.Syndication.SyndicationContent Clone() { throw null; } protected override void WriteContentsTo(System.Xml.XmlWriter writer) { } } public partial class Workspace { public Workspace() { } public Workspace(System.ServiceModel.Syndication.TextSyndicationContent title, System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.ResourceCollectionInfo> collections) { } public Workspace(string title, System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.ResourceCollectionInfo> collections) { } public System.Collections.Generic.Dictionary<System.Xml.XmlQualifiedName, string> AttributeExtensions { get { throw null; } } public System.Uri BaseUri { get { throw null; } set { } } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.ResourceCollectionInfo> Collections { get { throw null; } } public System.ServiceModel.Syndication.SyndicationElementExtensionCollection ElementExtensions { get { throw null; } } public System.ServiceModel.Syndication.TextSyndicationContent Title { get { throw null; } set { } } protected internal virtual System.ServiceModel.Syndication.ResourceCollectionInfo CreateResourceCollection() { throw null; } protected internal virtual bool TryParseAttribute(string name, string ns, string value, string version) { throw null; } protected internal virtual bool TryParseElement(System.Xml.XmlReader reader, string version) { throw null; } protected internal virtual void WriteAttributeExtensions(System.Xml.XmlWriter writer, string version) { } protected internal virtual void WriteElementExtensions(System.Xml.XmlWriter writer, string version) { } } public partial class XmlSyndicationContent : System.ServiceModel.Syndication.SyndicationContent { protected XmlSyndicationContent(System.ServiceModel.Syndication.XmlSyndicationContent source) { } public XmlSyndicationContent(string type, object dataContractExtension, System.Runtime.Serialization.XmlObjectSerializer dataContractSerializer) { } public XmlSyndicationContent(string type, object xmlSerializerExtension, System.Xml.Serialization.XmlSerializer serializer) { } public XmlSyndicationContent(string type, System.ServiceModel.Syndication.SyndicationElementExtension extension) { } public XmlSyndicationContent(System.Xml.XmlReader reader) { } public System.ServiceModel.Syndication.SyndicationElementExtension Extension { get { throw null; } } public override string Type { get { throw null; } } public override System.ServiceModel.Syndication.SyndicationContent Clone() { throw null; } public System.Xml.XmlDictionaryReader GetReaderAtContent() { throw null; } public TContent ReadContent<TContent>() { throw null; } public TContent ReadContent<TContent>(System.Runtime.Serialization.XmlObjectSerializer dataContractSerializer) { throw null; } public TContent ReadContent<TContent>(System.Xml.Serialization.XmlSerializer serializer) { throw null; } protected override void WriteContentsTo(System.Xml.XmlWriter writer) { } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ------------------------------------------------------------------------------ // Changes to this file must follow the https://aka.ms/api-review process. // ------------------------------------------------------------------------------ namespace System.ServiceModel.Syndication { [System.Xml.Serialization.XmlRootAttribute(ElementName="feed", Namespace="http://www.w3.org/2005/Atom")] public partial class Atom10FeedFormatter : System.ServiceModel.Syndication.SyndicationFeedFormatter, System.Xml.Serialization.IXmlSerializable { public Atom10FeedFormatter() { } public Atom10FeedFormatter(System.ServiceModel.Syndication.SyndicationFeed feedToWrite) { } public Atom10FeedFormatter(System.Type feedTypeToCreate) { } protected System.Type FeedType { get { throw null; } } public bool PreserveAttributeExtensions { get { throw null; } set { } } public bool PreserveElementExtensions { get { throw null; } set { } } public override string Version { get { throw null; } } public override bool CanRead(System.Xml.XmlReader reader) { throw null; } protected override System.ServiceModel.Syndication.SyndicationFeed CreateFeedInstance() { throw null; } public override void ReadFrom(System.Xml.XmlReader reader) { } protected virtual System.ServiceModel.Syndication.SyndicationItem ReadItem(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationFeed feed) { throw null; } protected virtual System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.SyndicationItem> ReadItems(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationFeed feed, out bool areAllItemsRead) { throw null; } System.Xml.Schema.XmlSchema System.Xml.Serialization.IXmlSerializable.GetSchema() { throw null; } void System.Xml.Serialization.IXmlSerializable.ReadXml(System.Xml.XmlReader reader) { } void System.Xml.Serialization.IXmlSerializable.WriteXml(System.Xml.XmlWriter writer) { } protected virtual void WriteItem(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationItem item, System.Uri feedBaseUri) { } protected virtual void WriteItems(System.Xml.XmlWriter writer, System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.SyndicationItem> items, System.Uri feedBaseUri) { } public override void WriteTo(System.Xml.XmlWriter writer) { } } [System.Xml.Serialization.XmlRootAttribute(ElementName="feed", Namespace="http://www.w3.org/2005/Atom")] public partial class Atom10FeedFormatter<TSyndicationFeed> : System.ServiceModel.Syndication.Atom10FeedFormatter where TSyndicationFeed : System.ServiceModel.Syndication.SyndicationFeed, new() { public Atom10FeedFormatter() { } public Atom10FeedFormatter(TSyndicationFeed feedToWrite) { } protected override System.ServiceModel.Syndication.SyndicationFeed CreateFeedInstance() { throw null; } } [System.Xml.Serialization.XmlRootAttribute(ElementName="entry", Namespace="http://www.w3.org/2005/Atom")] public partial class Atom10ItemFormatter : System.ServiceModel.Syndication.SyndicationItemFormatter, System.Xml.Serialization.IXmlSerializable { public Atom10ItemFormatter() { } public Atom10ItemFormatter(System.ServiceModel.Syndication.SyndicationItem itemToWrite) { } public Atom10ItemFormatter(System.Type itemTypeToCreate) { } protected System.Type ItemType { get { throw null; } } public bool PreserveAttributeExtensions { get { throw null; } set { } } public bool PreserveElementExtensions { get { throw null; } set { } } public override string Version { get { throw null; } } public override bool CanRead(System.Xml.XmlReader reader) { throw null; } protected override System.ServiceModel.Syndication.SyndicationItem CreateItemInstance() { throw null; } public override void ReadFrom(System.Xml.XmlReader reader) { } System.Xml.Schema.XmlSchema System.Xml.Serialization.IXmlSerializable.GetSchema() { throw null; } void System.Xml.Serialization.IXmlSerializable.ReadXml(System.Xml.XmlReader reader) { } void System.Xml.Serialization.IXmlSerializable.WriteXml(System.Xml.XmlWriter writer) { } public override void WriteTo(System.Xml.XmlWriter writer) { } } [System.Xml.Serialization.XmlRootAttribute(ElementName="entry", Namespace="http://www.w3.org/2005/Atom")] public partial class Atom10ItemFormatter<TSyndicationItem> : System.ServiceModel.Syndication.Atom10ItemFormatter where TSyndicationItem : System.ServiceModel.Syndication.SyndicationItem, new() { public Atom10ItemFormatter() { } public Atom10ItemFormatter(TSyndicationItem itemToWrite) { } protected override System.ServiceModel.Syndication.SyndicationItem CreateItemInstance() { throw null; } } [System.Xml.Serialization.XmlRootAttribute(ElementName="categories", Namespace="http://www.w3.org/2007/app")] public partial class AtomPub10CategoriesDocumentFormatter : System.ServiceModel.Syndication.CategoriesDocumentFormatter, System.Xml.Serialization.IXmlSerializable { public AtomPub10CategoriesDocumentFormatter() { } public AtomPub10CategoriesDocumentFormatter(System.ServiceModel.Syndication.CategoriesDocument documentToWrite) { } public AtomPub10CategoriesDocumentFormatter(System.Type inlineDocumentType, System.Type referencedDocumentType) { } public override string Version { get { throw null; } } public override bool CanRead(System.Xml.XmlReader reader) { throw null; } protected override System.ServiceModel.Syndication.InlineCategoriesDocument CreateInlineCategoriesDocument() { throw null; } protected override System.ServiceModel.Syndication.ReferencedCategoriesDocument CreateReferencedCategoriesDocument() { throw null; } public override void ReadFrom(System.Xml.XmlReader reader) { } System.Xml.Schema.XmlSchema System.Xml.Serialization.IXmlSerializable.GetSchema() { throw null; } void System.Xml.Serialization.IXmlSerializable.ReadXml(System.Xml.XmlReader reader) { } void System.Xml.Serialization.IXmlSerializable.WriteXml(System.Xml.XmlWriter writer) { } public override void WriteTo(System.Xml.XmlWriter writer) { } } [System.Xml.Serialization.XmlRootAttribute(ElementName="service", Namespace="http://www.w3.org/2007/app")] public partial class AtomPub10ServiceDocumentFormatter : System.ServiceModel.Syndication.ServiceDocumentFormatter, System.Xml.Serialization.IXmlSerializable { public AtomPub10ServiceDocumentFormatter() { } public AtomPub10ServiceDocumentFormatter(System.ServiceModel.Syndication.ServiceDocument documentToWrite) { } public AtomPub10ServiceDocumentFormatter(System.Type documentTypeToCreate) { } public override string Version { get { throw null; } } public override bool CanRead(System.Xml.XmlReader reader) { throw null; } protected override System.ServiceModel.Syndication.ServiceDocument CreateDocumentInstance() { throw null; } public override void ReadFrom(System.Xml.XmlReader reader) { } System.Xml.Schema.XmlSchema System.Xml.Serialization.IXmlSerializable.GetSchema() { throw null; } void System.Xml.Serialization.IXmlSerializable.ReadXml(System.Xml.XmlReader reader) { } void System.Xml.Serialization.IXmlSerializable.WriteXml(System.Xml.XmlWriter writer) { } public override void WriteTo(System.Xml.XmlWriter writer) { } } [System.Xml.Serialization.XmlRootAttribute(ElementName="service", Namespace="http://www.w3.org/2007/app")] public partial class AtomPub10ServiceDocumentFormatter<TServiceDocument> : System.ServiceModel.Syndication.AtomPub10ServiceDocumentFormatter where TServiceDocument : System.ServiceModel.Syndication.ServiceDocument, new() { public AtomPub10ServiceDocumentFormatter() { } public AtomPub10ServiceDocumentFormatter(TServiceDocument documentToWrite) { } protected override System.ServiceModel.Syndication.ServiceDocument CreateDocumentInstance() { throw null; } } public abstract partial class CategoriesDocument { internal CategoriesDocument() { } public System.Collections.Generic.Dictionary<System.Xml.XmlQualifiedName, string> AttributeExtensions { get { throw null; } } public System.Uri BaseUri { get { throw null; } set { } } public System.ServiceModel.Syndication.SyndicationElementExtensionCollection ElementExtensions { get { throw null; } } public string Language { get { throw null; } set { } } public static System.ServiceModel.Syndication.InlineCategoriesDocument Create(System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationCategory> categories) { throw null; } public static System.ServiceModel.Syndication.InlineCategoriesDocument Create(System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationCategory> categories, bool isFixed, string scheme) { throw null; } public static System.ServiceModel.Syndication.ReferencedCategoriesDocument Create(System.Uri linkToCategoriesDocument) { throw null; } public System.ServiceModel.Syndication.CategoriesDocumentFormatter GetFormatter() { throw null; } public static System.ServiceModel.Syndication.CategoriesDocument Load(System.Xml.XmlReader reader) { throw null; } public void Save(System.Xml.XmlWriter writer) { } protected internal virtual bool TryParseAttribute(string name, string ns, string value, string version) { throw null; } protected internal virtual bool TryParseElement(System.Xml.XmlReader reader, string version) { throw null; } protected internal virtual void WriteAttributeExtensions(System.Xml.XmlWriter writer, string version) { } protected internal virtual void WriteElementExtensions(System.Xml.XmlWriter writer, string version) { } } [System.Runtime.Serialization.DataContractAttribute] public abstract partial class CategoriesDocumentFormatter { protected CategoriesDocumentFormatter() { } protected CategoriesDocumentFormatter(System.ServiceModel.Syndication.CategoriesDocument documentToWrite) { } public System.ServiceModel.Syndication.CategoriesDocument Document { get { throw null; } } public abstract string Version { get; } public abstract bool CanRead(System.Xml.XmlReader reader); protected virtual System.ServiceModel.Syndication.InlineCategoriesDocument CreateInlineCategoriesDocument() { throw null; } protected virtual System.ServiceModel.Syndication.ReferencedCategoriesDocument CreateReferencedCategoriesDocument() { throw null; } public abstract void ReadFrom(System.Xml.XmlReader reader); protected virtual void SetDocument(System.ServiceModel.Syndication.CategoriesDocument document) { } public abstract void WriteTo(System.Xml.XmlWriter writer); } public partial class InlineCategoriesDocument : System.ServiceModel.Syndication.CategoriesDocument { public InlineCategoriesDocument() { } public InlineCategoriesDocument(System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.SyndicationCategory> categories) { } public InlineCategoriesDocument(System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.SyndicationCategory> categories, bool isFixed, string scheme) { } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationCategory> Categories { get { throw null; } } public bool IsFixed { get { throw null; } set { } } public string Scheme { get { throw null; } set { } } protected internal virtual System.ServiceModel.Syndication.SyndicationCategory CreateCategory() { throw null; } } public partial class ReferencedCategoriesDocument : System.ServiceModel.Syndication.CategoriesDocument { public ReferencedCategoriesDocument() { } public ReferencedCategoriesDocument(System.Uri link) { } public System.Uri Link { get { throw null; } set { } } } public partial class ResourceCollectionInfo { public ResourceCollectionInfo() { } public ResourceCollectionInfo(System.ServiceModel.Syndication.TextSyndicationContent title, System.Uri link) { } public ResourceCollectionInfo(System.ServiceModel.Syndication.TextSyndicationContent title, System.Uri link, System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.CategoriesDocument> categories, bool allowsNewEntries) { } public ResourceCollectionInfo(System.ServiceModel.Syndication.TextSyndicationContent title, System.Uri link, System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.CategoriesDocument> categories, System.Collections.Generic.IEnumerable<string> accepts) { } public ResourceCollectionInfo(string title, System.Uri link) { } public System.Collections.ObjectModel.Collection<string> Accepts { get { throw null; } } public System.Collections.Generic.Dictionary<System.Xml.XmlQualifiedName, string> AttributeExtensions { get { throw null; } } public System.Uri BaseUri { get { throw null; } set { } } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.CategoriesDocument> Categories { get { throw null; } } public System.ServiceModel.Syndication.SyndicationElementExtensionCollection ElementExtensions { get { throw null; } } public System.Uri Link { get { throw null; } set { } } public System.ServiceModel.Syndication.TextSyndicationContent Title { get { throw null; } set { } } protected internal virtual System.ServiceModel.Syndication.InlineCategoriesDocument CreateInlineCategoriesDocument() { throw null; } protected internal virtual System.ServiceModel.Syndication.ReferencedCategoriesDocument CreateReferencedCategoriesDocument() { throw null; } protected internal virtual bool TryParseAttribute(string name, string ns, string value, string version) { throw null; } protected internal virtual bool TryParseElement(System.Xml.XmlReader reader, string version) { throw null; } protected internal virtual void WriteAttributeExtensions(System.Xml.XmlWriter writer, string version) { } protected internal virtual void WriteElementExtensions(System.Xml.XmlWriter writer, string version) { } } [System.Xml.Serialization.XmlRootAttribute(ElementName="rss", Namespace="")] public partial class Rss20FeedFormatter : System.ServiceModel.Syndication.SyndicationFeedFormatter, System.Xml.Serialization.IXmlSerializable { public Rss20FeedFormatter() { } public Rss20FeedFormatter(System.ServiceModel.Syndication.SyndicationFeed feedToWrite) { } public Rss20FeedFormatter(System.ServiceModel.Syndication.SyndicationFeed feedToWrite, bool serializeExtensionsAsAtom) { } public Rss20FeedFormatter(System.Type feedTypeToCreate) { } protected System.Type FeedType { get { throw null; } } public bool PreserveAttributeExtensions { get { throw null; } set { } } public bool PreserveElementExtensions { get { throw null; } set { } } public bool SerializeExtensionsAsAtom { get { throw null; } set { } } public override string Version { get { throw null; } } public override bool CanRead(System.Xml.XmlReader reader) { throw null; } protected override System.ServiceModel.Syndication.SyndicationFeed CreateFeedInstance() { throw null; } public override void ReadFrom(System.Xml.XmlReader reader) { } protected virtual System.ServiceModel.Syndication.SyndicationItem ReadItem(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationFeed feed) { throw null; } protected virtual System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.SyndicationItem> ReadItems(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationFeed feed, out bool areAllItemsRead) { throw null; } protected internal override void SetFeed(System.ServiceModel.Syndication.SyndicationFeed feed) { } System.Xml.Schema.XmlSchema System.Xml.Serialization.IXmlSerializable.GetSchema() { throw null; } void System.Xml.Serialization.IXmlSerializable.ReadXml(System.Xml.XmlReader reader) { } void System.Xml.Serialization.IXmlSerializable.WriteXml(System.Xml.XmlWriter writer) { } protected virtual void WriteItem(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationItem item, System.Uri feedBaseUri) { } protected virtual void WriteItems(System.Xml.XmlWriter writer, System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.SyndicationItem> items, System.Uri feedBaseUri) { } public override void WriteTo(System.Xml.XmlWriter writer) { } } [System.Xml.Serialization.XmlRootAttribute(ElementName="rss", Namespace="")] public partial class Rss20FeedFormatter<TSyndicationFeed> : System.ServiceModel.Syndication.Rss20FeedFormatter where TSyndicationFeed : System.ServiceModel.Syndication.SyndicationFeed, new() { public Rss20FeedFormatter() { } public Rss20FeedFormatter(TSyndicationFeed feedToWrite) { } public Rss20FeedFormatter(TSyndicationFeed feedToWrite, bool serializeExtensionsAsAtom) { } protected override System.ServiceModel.Syndication.SyndicationFeed CreateFeedInstance() { throw null; } } [System.Xml.Serialization.XmlRootAttribute(ElementName="item", Namespace="")] public partial class Rss20ItemFormatter : System.ServiceModel.Syndication.SyndicationItemFormatter, System.Xml.Serialization.IXmlSerializable { public Rss20ItemFormatter() { } public Rss20ItemFormatter(System.ServiceModel.Syndication.SyndicationItem itemToWrite) { } public Rss20ItemFormatter(System.ServiceModel.Syndication.SyndicationItem itemToWrite, bool serializeExtensionsAsAtom) { } public Rss20ItemFormatter(System.Type itemTypeToCreate) { } protected System.Type ItemType { get { throw null; } } public bool PreserveAttributeExtensions { get { throw null; } set { } } public bool PreserveElementExtensions { get { throw null; } set { } } public bool SerializeExtensionsAsAtom { get { throw null; } set { } } public override string Version { get { throw null; } } public override bool CanRead(System.Xml.XmlReader reader) { throw null; } protected override System.ServiceModel.Syndication.SyndicationItem CreateItemInstance() { throw null; } public override void ReadFrom(System.Xml.XmlReader reader) { } System.Xml.Schema.XmlSchema System.Xml.Serialization.IXmlSerializable.GetSchema() { throw null; } void System.Xml.Serialization.IXmlSerializable.ReadXml(System.Xml.XmlReader reader) { } void System.Xml.Serialization.IXmlSerializable.WriteXml(System.Xml.XmlWriter writer) { } public override void WriteTo(System.Xml.XmlWriter writer) { } } [System.Xml.Serialization.XmlRootAttribute(ElementName="item", Namespace="")] public partial class Rss20ItemFormatter<TSyndicationItem> : System.ServiceModel.Syndication.Rss20ItemFormatter, System.Xml.Serialization.IXmlSerializable where TSyndicationItem : System.ServiceModel.Syndication.SyndicationItem, new() { public Rss20ItemFormatter() { } public Rss20ItemFormatter(TSyndicationItem itemToWrite) { } public Rss20ItemFormatter(TSyndicationItem itemToWrite, bool serializeExtensionsAsAtom) { } protected override System.ServiceModel.Syndication.SyndicationItem CreateItemInstance() { throw null; } } public partial class ServiceDocument { public ServiceDocument() { } public ServiceDocument(System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.Workspace> workspaces) { } public System.Collections.Generic.Dictionary<System.Xml.XmlQualifiedName, string> AttributeExtensions { get { throw null; } } public System.Uri BaseUri { get { throw null; } set { } } public System.ServiceModel.Syndication.SyndicationElementExtensionCollection ElementExtensions { get { throw null; } } public string Language { get { throw null; } set { } } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.Workspace> Workspaces { get { throw null; } } protected internal virtual System.ServiceModel.Syndication.Workspace CreateWorkspace() { throw null; } public System.ServiceModel.Syndication.ServiceDocumentFormatter GetFormatter() { throw null; } public static System.ServiceModel.Syndication.ServiceDocument Load(System.Xml.XmlReader reader) { throw null; } public static TServiceDocument Load<TServiceDocument>(System.Xml.XmlReader reader) where TServiceDocument : System.ServiceModel.Syndication.ServiceDocument, new() { throw null; } public void Save(System.Xml.XmlWriter writer) { } protected internal virtual bool TryParseAttribute(string name, string ns, string value, string version) { throw null; } protected internal virtual bool TryParseElement(System.Xml.XmlReader reader, string version) { throw null; } protected internal virtual void WriteAttributeExtensions(System.Xml.XmlWriter writer, string version) { } protected internal virtual void WriteElementExtensions(System.Xml.XmlWriter writer, string version) { } } [System.Runtime.Serialization.DataContractAttribute] public abstract partial class ServiceDocumentFormatter { protected ServiceDocumentFormatter() { } protected ServiceDocumentFormatter(System.ServiceModel.Syndication.ServiceDocument documentToWrite) { } public System.ServiceModel.Syndication.ServiceDocument Document { get { throw null; } } public abstract string Version { get; } public abstract bool CanRead(System.Xml.XmlReader reader); protected static System.ServiceModel.Syndication.SyndicationCategory CreateCategory(System.ServiceModel.Syndication.InlineCategoriesDocument inlineCategories) { throw null; } protected static System.ServiceModel.Syndication.ResourceCollectionInfo CreateCollection(System.ServiceModel.Syndication.Workspace workspace) { throw null; } protected virtual System.ServiceModel.Syndication.ServiceDocument CreateDocumentInstance() { throw null; } protected static System.ServiceModel.Syndication.InlineCategoriesDocument CreateInlineCategories(System.ServiceModel.Syndication.ResourceCollectionInfo collection) { throw null; } protected static System.ServiceModel.Syndication.ReferencedCategoriesDocument CreateReferencedCategories(System.ServiceModel.Syndication.ResourceCollectionInfo collection) { throw null; } protected static System.ServiceModel.Syndication.Workspace CreateWorkspace(System.ServiceModel.Syndication.ServiceDocument document) { throw null; } protected static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.CategoriesDocument categories, int maxExtensionSize) { } protected static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.ResourceCollectionInfo collection, int maxExtensionSize) { } protected static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.ServiceDocument document, int maxExtensionSize) { } protected static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.Workspace workspace, int maxExtensionSize) { } public abstract void ReadFrom(System.Xml.XmlReader reader); protected virtual void SetDocument(System.ServiceModel.Syndication.ServiceDocument document) { } protected static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.CategoriesDocument categories, string version) { throw null; } protected static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.ResourceCollectionInfo collection, string version) { throw null; } protected static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.ServiceDocument document, string version) { throw null; } protected static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.Workspace workspace, string version) { throw null; } protected static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.CategoriesDocument categories, string version) { throw null; } protected static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.ResourceCollectionInfo collection, string version) { throw null; } protected static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.ServiceDocument document, string version) { throw null; } protected static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.Workspace workspace, string version) { throw null; } protected static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.CategoriesDocument categories, string version) { } protected static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.ResourceCollectionInfo collection, string version) { } protected static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.ServiceDocument document, string version) { } protected static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.Workspace workspace, string version) { } protected static void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.CategoriesDocument categories, string version) { } protected static void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.ResourceCollectionInfo collection, string version) { } protected static void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.ServiceDocument document, string version) { } protected static void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.Workspace workspace, string version) { } public abstract void WriteTo(System.Xml.XmlWriter writer); } public partial class SyndicationCategory { public SyndicationCategory() { } protected SyndicationCategory(System.ServiceModel.Syndication.SyndicationCategory source) { } public SyndicationCategory(string name) { } public SyndicationCategory(string name, string scheme, string label) { } public System.Collections.Generic.Dictionary<System.Xml.XmlQualifiedName, string> AttributeExtensions { get { throw null; } } public System.ServiceModel.Syndication.SyndicationElementExtensionCollection ElementExtensions { get { throw null; } } public string Label { get { throw null; } set { } } public string Name { get { throw null; } set { } } public string Scheme { get { throw null; } set { } } public virtual System.ServiceModel.Syndication.SyndicationCategory Clone() { throw null; } protected internal virtual bool TryParseAttribute(string name, string ns, string value, string version) { throw null; } protected internal virtual bool TryParseElement(System.Xml.XmlReader reader, string version) { throw null; } protected internal virtual void WriteAttributeExtensions(System.Xml.XmlWriter writer, string version) { } protected internal virtual void WriteElementExtensions(System.Xml.XmlWriter writer, string version) { } } public abstract partial class SyndicationContent { protected SyndicationContent() { } protected SyndicationContent(System.ServiceModel.Syndication.SyndicationContent source) { } public System.Collections.Generic.Dictionary<System.Xml.XmlQualifiedName, string> AttributeExtensions { get { throw null; } } public abstract string Type { get; } public abstract System.ServiceModel.Syndication.SyndicationContent Clone(); public static System.ServiceModel.Syndication.TextSyndicationContent CreateHtmlContent(string content) { throw null; } public static System.ServiceModel.Syndication.TextSyndicationContent CreatePlaintextContent(string content) { throw null; } public static System.ServiceModel.Syndication.UrlSyndicationContent CreateUrlContent(System.Uri url, string mediaType) { throw null; } public static System.ServiceModel.Syndication.TextSyndicationContent CreateXhtmlContent(string content) { throw null; } public static System.ServiceModel.Syndication.XmlSyndicationContent CreateXmlContent(object dataContractObject) { throw null; } public static System.ServiceModel.Syndication.XmlSyndicationContent CreateXmlContent(object dataContractObject, System.Runtime.Serialization.XmlObjectSerializer dataContractSerializer) { throw null; } public static System.ServiceModel.Syndication.XmlSyndicationContent CreateXmlContent(object xmlSerializerObject, System.Xml.Serialization.XmlSerializer serializer) { throw null; } public static System.ServiceModel.Syndication.XmlSyndicationContent CreateXmlContent(System.Xml.XmlReader xmlReader) { throw null; } protected abstract void WriteContentsTo(System.Xml.XmlWriter writer); public void WriteTo(System.Xml.XmlWriter writer, string outerElementName, string outerElementNamespace) { } } public partial class SyndicationElementExtension { public SyndicationElementExtension(object dataContractExtension) { } public SyndicationElementExtension(object dataContractExtension, System.Runtime.Serialization.XmlObjectSerializer dataContractSerializer) { } public SyndicationElementExtension(object xmlSerializerExtension, System.Xml.Serialization.XmlSerializer serializer) { } public SyndicationElementExtension(string outerName, string outerNamespace, object dataContractExtension) { } public SyndicationElementExtension(string outerName, string outerNamespace, object dataContractExtension, System.Runtime.Serialization.XmlObjectSerializer dataContractSerializer) { } public SyndicationElementExtension(System.Xml.XmlReader xmlReader) { } public string OuterName { get { throw null; } } public string OuterNamespace { get { throw null; } } public TExtension GetObject<TExtension>() { throw null; } public TExtension GetObject<TExtension>(System.Runtime.Serialization.XmlObjectSerializer serializer) { throw null; } public TExtension GetObject<TExtension>(System.Xml.Serialization.XmlSerializer serializer) { throw null; } public System.Xml.XmlReader GetReader() { throw null; } public void WriteTo(System.Xml.XmlWriter writer) { } } public sealed partial class SyndicationElementExtensionCollection : System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationElementExtension> { internal SyndicationElementExtensionCollection() { } public void Add(object extension) { } public void Add(object dataContractExtension, System.Runtime.Serialization.DataContractSerializer serializer) { } public void Add(object xmlSerializerExtension, System.Xml.Serialization.XmlSerializer serializer) { } public void Add(string outerName, string outerNamespace, object dataContractExtension) { } public void Add(string outerName, string outerNamespace, object dataContractExtension, System.Runtime.Serialization.XmlObjectSerializer dataContractSerializer) { } public void Add(System.Xml.XmlReader xmlReader) { } protected override void ClearItems() { } public System.Xml.XmlReader GetReaderAtElementExtensions() { throw null; } protected override void InsertItem(int index, System.ServiceModel.Syndication.SyndicationElementExtension item) { } public System.Collections.ObjectModel.Collection<TExtension> ReadElementExtensions<TExtension>(string extensionName, string extensionNamespace) { throw null; } public System.Collections.ObjectModel.Collection<TExtension> ReadElementExtensions<TExtension>(string extensionName, string extensionNamespace, System.Runtime.Serialization.XmlObjectSerializer serializer) { throw null; } public System.Collections.ObjectModel.Collection<TExtension> ReadElementExtensions<TExtension>(string extensionName, string extensionNamespace, System.Xml.Serialization.XmlSerializer serializer) { throw null; } protected override void RemoveItem(int index) { } protected override void SetItem(int index, System.ServiceModel.Syndication.SyndicationElementExtension item) { } } public partial class SyndicationFeed { public SyndicationFeed() { } public SyndicationFeed(System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.SyndicationItem> items) { } protected SyndicationFeed(System.ServiceModel.Syndication.SyndicationFeed source, bool cloneItems) { } public SyndicationFeed(string title, string description, System.Uri feedAlternateLink) { } public SyndicationFeed(string title, string description, System.Uri feedAlternateLink, System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.SyndicationItem> items) { } public SyndicationFeed(string title, string description, System.Uri feedAlternateLink, string id, System.DateTimeOffset lastUpdatedTime) { } public SyndicationFeed(string title, string description, System.Uri feedAlternateLink, string id, System.DateTimeOffset lastUpdatedTime, System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.SyndicationItem> items) { } public System.Collections.Generic.Dictionary<System.Xml.XmlQualifiedName, string> AttributeExtensions { get { throw null; } } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationPerson> Authors { get { throw null; } } public System.Uri BaseUri { get { throw null; } set { } } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationCategory> Categories { get { throw null; } } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationPerson> Contributors { get { throw null; } } public System.ServiceModel.Syndication.TextSyndicationContent Copyright { get { throw null; } set { } } public System.ServiceModel.Syndication.TextSyndicationContent Description { get { throw null; } set { } } public System.ServiceModel.Syndication.SyndicationElementExtensionCollection ElementExtensions { get { throw null; } } public string Generator { get { throw null; } set { } } public string Id { get { throw null; } set { } } public System.Uri ImageUrl { get { throw null; } set { } } public System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.SyndicationItem> Items { get { throw null; } set { } } public string Language { get { throw null; } set { } } public System.DateTimeOffset LastUpdatedTime { get { throw null; } set { } } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationLink> Links { get { throw null; } } public System.ServiceModel.Syndication.TextSyndicationContent Title { get { throw null; } set { } } public virtual System.ServiceModel.Syndication.SyndicationFeed Clone(bool cloneItems) { throw null; } protected internal virtual System.ServiceModel.Syndication.SyndicationCategory CreateCategory() { throw null; } protected internal virtual System.ServiceModel.Syndication.SyndicationItem CreateItem() { throw null; } protected internal virtual System.ServiceModel.Syndication.SyndicationLink CreateLink() { throw null; } protected internal virtual System.ServiceModel.Syndication.SyndicationPerson CreatePerson() { throw null; } public System.ServiceModel.Syndication.Atom10FeedFormatter GetAtom10Formatter() { throw null; } public System.ServiceModel.Syndication.Rss20FeedFormatter GetRss20Formatter() { throw null; } public System.ServiceModel.Syndication.Rss20FeedFormatter GetRss20Formatter(bool serializeExtensionsAsAtom) { throw null; } public static System.ServiceModel.Syndication.SyndicationFeed Load(System.Xml.XmlReader reader) { throw null; } public static TSyndicationFeed Load<TSyndicationFeed>(System.Xml.XmlReader reader) where TSyndicationFeed : System.ServiceModel.Syndication.SyndicationFeed, new() { throw null; } public void SaveAsAtom10(System.Xml.XmlWriter writer) { } public void SaveAsRss20(System.Xml.XmlWriter writer) { } protected internal virtual bool TryParseAttribute(string name, string ns, string value, string version) { throw null; } protected internal virtual bool TryParseElement(System.Xml.XmlReader reader, string version) { throw null; } protected internal virtual void WriteAttributeExtensions(System.Xml.XmlWriter writer, string version) { } protected internal virtual void WriteElementExtensions(System.Xml.XmlWriter writer, string version) { } } [System.Runtime.Serialization.DataContractAttribute] public abstract partial class SyndicationFeedFormatter { protected SyndicationFeedFormatter() { } protected SyndicationFeedFormatter(System.ServiceModel.Syndication.SyndicationFeed feedToWrite) { } public System.ServiceModel.Syndication.SyndicationFeed Feed { get { throw null; } } public abstract string Version { get; } public abstract bool CanRead(System.Xml.XmlReader reader); protected internal static System.ServiceModel.Syndication.SyndicationCategory CreateCategory(System.ServiceModel.Syndication.SyndicationFeed feed) { throw null; } protected internal static System.ServiceModel.Syndication.SyndicationCategory CreateCategory(System.ServiceModel.Syndication.SyndicationItem item) { throw null; } protected abstract System.ServiceModel.Syndication.SyndicationFeed CreateFeedInstance(); protected internal static System.ServiceModel.Syndication.SyndicationItem CreateItem(System.ServiceModel.Syndication.SyndicationFeed feed) { throw null; } protected internal static System.ServiceModel.Syndication.SyndicationLink CreateLink(System.ServiceModel.Syndication.SyndicationFeed feed) { throw null; } protected internal static System.ServiceModel.Syndication.SyndicationLink CreateLink(System.ServiceModel.Syndication.SyndicationItem item) { throw null; } protected internal static System.ServiceModel.Syndication.SyndicationPerson CreatePerson(System.ServiceModel.Syndication.SyndicationFeed feed) { throw null; } protected internal static System.ServiceModel.Syndication.SyndicationPerson CreatePerson(System.ServiceModel.Syndication.SyndicationItem item) { throw null; } protected internal static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationCategory category, int maxExtensionSize) { } protected internal static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationFeed feed, int maxExtensionSize) { } protected internal static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationItem item, int maxExtensionSize) { } protected internal static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationLink link, int maxExtensionSize) { } protected internal static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationPerson person, int maxExtensionSize) { } public abstract void ReadFrom(System.Xml.XmlReader reader); protected internal virtual void SetFeed(System.ServiceModel.Syndication.SyndicationFeed feed) { } public override string ToString() { throw null; } protected internal static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.SyndicationCategory category, string version) { throw null; } protected internal static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.SyndicationFeed feed, string version) { throw null; } protected internal static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.SyndicationItem item, string version) { throw null; } protected internal static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.SyndicationLink link, string version) { throw null; } protected internal static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.SyndicationPerson person, string version) { throw null; } protected internal static bool TryParseContent(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationItem item, string contentType, string version, out System.ServiceModel.Syndication.SyndicationContent content) { throw null; } protected internal static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationCategory category, string version) { throw null; } protected internal static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationFeed feed, string version) { throw null; } protected internal static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationItem item, string version) { throw null; } protected internal static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationLink link, string version) { throw null; } protected internal static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationPerson person, string version) { throw null; } protected internal static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationCategory category, string version) { } protected internal static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationFeed feed, string version) { } protected internal static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationItem item, string version) { } protected internal static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationLink link, string version) { } protected internal static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationPerson person, string version) { } protected internal static void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationCategory category, string version) { } protected internal static void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationFeed feed, string version) { } protected internal static void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationItem item, string version) { } protected internal static void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationLink link, string version) { } protected internal static void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationPerson person, string version) { } public abstract void WriteTo(System.Xml.XmlWriter writer); } public partial class SyndicationItem { public SyndicationItem() { } protected SyndicationItem(System.ServiceModel.Syndication.SyndicationItem source) { } public SyndicationItem(string title, System.ServiceModel.Syndication.SyndicationContent content, System.Uri itemAlternateLink, string id, System.DateTimeOffset lastUpdatedTime) { } public SyndicationItem(string title, string content, System.Uri itemAlternateLink) { } public SyndicationItem(string title, string content, System.Uri itemAlternateLink, string id, System.DateTimeOffset lastUpdatedTime) { } public System.Collections.Generic.Dictionary<System.Xml.XmlQualifiedName, string> AttributeExtensions { get { throw null; } } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationPerson> Authors { get { throw null; } } public System.Uri BaseUri { get { throw null; } set { } } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationCategory> Categories { get { throw null; } } public System.ServiceModel.Syndication.SyndicationContent Content { get { throw null; } set { } } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationPerson> Contributors { get { throw null; } } public System.ServiceModel.Syndication.TextSyndicationContent Copyright { get { throw null; } set { } } public System.ServiceModel.Syndication.SyndicationElementExtensionCollection ElementExtensions { get { throw null; } } public string Id { get { throw null; } set { } } public System.DateTimeOffset LastUpdatedTime { get { throw null; } set { } } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.SyndicationLink> Links { get { throw null; } } public System.DateTimeOffset PublishDate { get { throw null; } set { } } public System.ServiceModel.Syndication.SyndicationFeed SourceFeed { get { throw null; } set { } } public System.ServiceModel.Syndication.TextSyndicationContent Summary { get { throw null; } set { } } public System.ServiceModel.Syndication.TextSyndicationContent Title { get { throw null; } set { } } public void AddPermalink(System.Uri permalink) { } public virtual System.ServiceModel.Syndication.SyndicationItem Clone() { throw null; } protected internal virtual System.ServiceModel.Syndication.SyndicationCategory CreateCategory() { throw null; } protected internal virtual System.ServiceModel.Syndication.SyndicationLink CreateLink() { throw null; } protected internal virtual System.ServiceModel.Syndication.SyndicationPerson CreatePerson() { throw null; } public System.ServiceModel.Syndication.Atom10ItemFormatter GetAtom10Formatter() { throw null; } public System.ServiceModel.Syndication.Rss20ItemFormatter GetRss20Formatter() { throw null; } public System.ServiceModel.Syndication.Rss20ItemFormatter GetRss20Formatter(bool serializeExtensionsAsAtom) { throw null; } public static System.ServiceModel.Syndication.SyndicationItem Load(System.Xml.XmlReader reader) { throw null; } public static TSyndicationItem Load<TSyndicationItem>(System.Xml.XmlReader reader) where TSyndicationItem : System.ServiceModel.Syndication.SyndicationItem, new() { throw null; } public void SaveAsAtom10(System.Xml.XmlWriter writer) { } public void SaveAsRss20(System.Xml.XmlWriter writer) { } protected internal virtual bool TryParseAttribute(string name, string ns, string value, string version) { throw null; } protected internal virtual bool TryParseContent(System.Xml.XmlReader reader, string contentType, string version, out System.ServiceModel.Syndication.SyndicationContent content) { throw null; } protected internal virtual bool TryParseElement(System.Xml.XmlReader reader, string version) { throw null; } protected internal virtual void WriteAttributeExtensions(System.Xml.XmlWriter writer, string version) { } protected internal virtual void WriteElementExtensions(System.Xml.XmlWriter writer, string version) { } } [System.Runtime.Serialization.DataContractAttribute] public abstract partial class SyndicationItemFormatter { protected SyndicationItemFormatter() { } protected SyndicationItemFormatter(System.ServiceModel.Syndication.SyndicationItem itemToWrite) { } public System.ServiceModel.Syndication.SyndicationItem Item { get { throw null; } } public abstract string Version { get; } public abstract bool CanRead(System.Xml.XmlReader reader); protected static System.ServiceModel.Syndication.SyndicationCategory CreateCategory(System.ServiceModel.Syndication.SyndicationItem item) { throw null; } protected abstract System.ServiceModel.Syndication.SyndicationItem CreateItemInstance(); protected static System.ServiceModel.Syndication.SyndicationLink CreateLink(System.ServiceModel.Syndication.SyndicationItem item) { throw null; } protected static System.ServiceModel.Syndication.SyndicationPerson CreatePerson(System.ServiceModel.Syndication.SyndicationItem item) { throw null; } protected static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationCategory category, int maxExtensionSize) { } protected static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationItem item, int maxExtensionSize) { } protected static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationLink link, int maxExtensionSize) { } protected static void LoadElementExtensions(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationPerson person, int maxExtensionSize) { } public abstract void ReadFrom(System.Xml.XmlReader reader); protected internal virtual void SetItem(System.ServiceModel.Syndication.SyndicationItem item) { } public override string ToString() { throw null; } protected static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.SyndicationCategory category, string version) { throw null; } protected static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.SyndicationItem item, string version) { throw null; } protected static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.SyndicationLink link, string version) { throw null; } protected static bool TryParseAttribute(string name, string ns, string value, System.ServiceModel.Syndication.SyndicationPerson person, string version) { throw null; } protected static bool TryParseContent(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationItem item, string contentType, string version, out System.ServiceModel.Syndication.SyndicationContent content) { throw null; } protected static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationCategory category, string version) { throw null; } protected static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationItem item, string version) { throw null; } protected static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationLink link, string version) { throw null; } protected static bool TryParseElement(System.Xml.XmlReader reader, System.ServiceModel.Syndication.SyndicationPerson person, string version) { throw null; } protected static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationCategory category, string version) { } protected static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationItem item, string version) { } protected static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationLink link, string version) { } protected static void WriteAttributeExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationPerson person, string version) { } protected void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationCategory category, string version) { } protected static void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationItem item, string version) { } protected void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationLink link, string version) { } protected void WriteElementExtensions(System.Xml.XmlWriter writer, System.ServiceModel.Syndication.SyndicationPerson person, string version) { } public abstract void WriteTo(System.Xml.XmlWriter writer); } public partial class SyndicationLink { public SyndicationLink() { } protected SyndicationLink(System.ServiceModel.Syndication.SyndicationLink source) { } public SyndicationLink(System.Uri uri) { } public SyndicationLink(System.Uri uri, string relationshipType, string title, string mediaType, long length) { } public System.Collections.Generic.Dictionary<System.Xml.XmlQualifiedName, string> AttributeExtensions { get { throw null; } } public System.Uri BaseUri { get { throw null; } set { } } public System.ServiceModel.Syndication.SyndicationElementExtensionCollection ElementExtensions { get { throw null; } } public long Length { get { throw null; } set { } } public string MediaType { get { throw null; } set { } } public string RelationshipType { get { throw null; } set { } } public string Title { get { throw null; } set { } } public System.Uri Uri { get { throw null; } set { } } public virtual System.ServiceModel.Syndication.SyndicationLink Clone() { throw null; } public static System.ServiceModel.Syndication.SyndicationLink CreateAlternateLink(System.Uri uri) { throw null; } public static System.ServiceModel.Syndication.SyndicationLink CreateAlternateLink(System.Uri uri, string mediaType) { throw null; } public static System.ServiceModel.Syndication.SyndicationLink CreateMediaEnclosureLink(System.Uri uri, string mediaType, long length) { throw null; } public static System.ServiceModel.Syndication.SyndicationLink CreateSelfLink(System.Uri uri) { throw null; } public static System.ServiceModel.Syndication.SyndicationLink CreateSelfLink(System.Uri uri, string mediaType) { throw null; } public System.Uri GetAbsoluteUri() { throw null; } protected internal virtual bool TryParseAttribute(string name, string ns, string value, string version) { throw null; } protected internal virtual bool TryParseElement(System.Xml.XmlReader reader, string version) { throw null; } protected internal virtual void WriteAttributeExtensions(System.Xml.XmlWriter writer, string version) { } protected internal virtual void WriteElementExtensions(System.Xml.XmlWriter writer, string version) { } } public partial class SyndicationPerson { public SyndicationPerson() { } protected SyndicationPerson(System.ServiceModel.Syndication.SyndicationPerson source) { } public SyndicationPerson(string email) { } public SyndicationPerson(string email, string name, string uri) { } public System.Collections.Generic.Dictionary<System.Xml.XmlQualifiedName, string> AttributeExtensions { get { throw null; } } public System.ServiceModel.Syndication.SyndicationElementExtensionCollection ElementExtensions { get { throw null; } } public string Email { get { throw null; } set { } } public string Name { get { throw null; } set { } } public string Uri { get { throw null; } set { } } public virtual System.ServiceModel.Syndication.SyndicationPerson Clone() { throw null; } protected internal virtual bool TryParseAttribute(string name, string ns, string value, string version) { throw null; } protected internal virtual bool TryParseElement(System.Xml.XmlReader reader, string version) { throw null; } protected internal virtual void WriteAttributeExtensions(System.Xml.XmlWriter writer, string version) { } protected internal virtual void WriteElementExtensions(System.Xml.XmlWriter writer, string version) { } } public static partial class SyndicationVersions { public const string Atom10 = "Atom10"; public const string Rss20 = "Rss20"; } public partial class TextSyndicationContent : System.ServiceModel.Syndication.SyndicationContent { protected TextSyndicationContent(System.ServiceModel.Syndication.TextSyndicationContent source) { } public TextSyndicationContent(string text) { } public TextSyndicationContent(string text, System.ServiceModel.Syndication.TextSyndicationContentKind textKind) { } public string Text { get { throw null; } } public override string Type { get { throw null; } } public override System.ServiceModel.Syndication.SyndicationContent Clone() { throw null; } protected override void WriteContentsTo(System.Xml.XmlWriter writer) { } } public enum TextSyndicationContentKind { Plaintext = 0, Html = 1, XHtml = 2, } public partial class UrlSyndicationContent : System.ServiceModel.Syndication.SyndicationContent { protected UrlSyndicationContent(System.ServiceModel.Syndication.UrlSyndicationContent source) { } public UrlSyndicationContent(System.Uri url, string mediaType) { } public override string Type { get { throw null; } } public System.Uri Url { get { throw null; } } public override System.ServiceModel.Syndication.SyndicationContent Clone() { throw null; } protected override void WriteContentsTo(System.Xml.XmlWriter writer) { } } public partial class Workspace { public Workspace() { } public Workspace(System.ServiceModel.Syndication.TextSyndicationContent title, System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.ResourceCollectionInfo> collections) { } public Workspace(string title, System.Collections.Generic.IEnumerable<System.ServiceModel.Syndication.ResourceCollectionInfo> collections) { } public System.Collections.Generic.Dictionary<System.Xml.XmlQualifiedName, string> AttributeExtensions { get { throw null; } } public System.Uri BaseUri { get { throw null; } set { } } public System.Collections.ObjectModel.Collection<System.ServiceModel.Syndication.ResourceCollectionInfo> Collections { get { throw null; } } public System.ServiceModel.Syndication.SyndicationElementExtensionCollection ElementExtensions { get { throw null; } } public System.ServiceModel.Syndication.TextSyndicationContent Title { get { throw null; } set { } } protected internal virtual System.ServiceModel.Syndication.ResourceCollectionInfo CreateResourceCollection() { throw null; } protected internal virtual bool TryParseAttribute(string name, string ns, string value, string version) { throw null; } protected internal virtual bool TryParseElement(System.Xml.XmlReader reader, string version) { throw null; } protected internal virtual void WriteAttributeExtensions(System.Xml.XmlWriter writer, string version) { } protected internal virtual void WriteElementExtensions(System.Xml.XmlWriter writer, string version) { } } public partial class XmlSyndicationContent : System.ServiceModel.Syndication.SyndicationContent { protected XmlSyndicationContent(System.ServiceModel.Syndication.XmlSyndicationContent source) { } public XmlSyndicationContent(string type, object dataContractExtension, System.Runtime.Serialization.XmlObjectSerializer dataContractSerializer) { } public XmlSyndicationContent(string type, object xmlSerializerExtension, System.Xml.Serialization.XmlSerializer serializer) { } public XmlSyndicationContent(string type, System.ServiceModel.Syndication.SyndicationElementExtension extension) { } public XmlSyndicationContent(System.Xml.XmlReader reader) { } public System.ServiceModel.Syndication.SyndicationElementExtension Extension { get { throw null; } } public override string Type { get { throw null; } } public override System.ServiceModel.Syndication.SyndicationContent Clone() { throw null; } public System.Xml.XmlDictionaryReader GetReaderAtContent() { throw null; } public TContent ReadContent<TContent>() { throw null; } public TContent ReadContent<TContent>(System.Runtime.Serialization.XmlObjectSerializer dataContractSerializer) { throw null; } public TContent ReadContent<TContent>(System.Xml.Serialization.XmlSerializer serializer) { throw null; } protected override void WriteContentsTo(System.Xml.XmlWriter writer) { } } }
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/ShiftRightArithmetic.Vector128.Int32.1.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void ShiftRightArithmetic_Vector128_Int32_1() { var test = new ImmUnaryOpTest__ShiftRightArithmetic_Vector128_Int32_1(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ImmUnaryOpTest__ShiftRightArithmetic_Vector128_Int32_1 { private struct DataTable { private byte[] inArray; private byte[] outArray; private GCHandle inHandle; private GCHandle outHandle; private ulong alignment; public DataTable(Int32[] inArray, Int32[] outArray, int alignment) { int sizeOfinArray = inArray.Length * Unsafe.SizeOf<Int32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle = GCHandle.Alloc(this.inArray, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArrayPtr), ref Unsafe.As<Int32, byte>(ref inArray[0]), (uint)sizeOfinArray); } public void* inArrayPtr => Align((byte*)(inHandle.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int32> _fld; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld), ref Unsafe.As<Int32, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); return testStruct; } public void RunStructFldScenario(ImmUnaryOpTest__ShiftRightArithmetic_Vector128_Int32_1 testClass) { var result = AdvSimd.ShiftRightArithmetic(_fld, 1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(ImmUnaryOpTest__ShiftRightArithmetic_Vector128_Int32_1 testClass) { fixed (Vector128<Int32>* pFld = &_fld) { var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector128((Int32*)(pFld)), 1 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static readonly byte Imm = 1; private static Int32[] _data = new Int32[Op1ElementCount]; private static Vector128<Int32> _clsVar; private Vector128<Int32> _fld; private DataTable _dataTable; static ImmUnaryOpTest__ShiftRightArithmetic_Vector128_Int32_1() { for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar), ref Unsafe.As<Int32, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); } public ImmUnaryOpTest__ShiftRightArithmetic_Vector128_Int32_1() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld), ref Unsafe.As<Int32, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new DataTable(_data, new Int32[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.ShiftRightArithmetic( Unsafe.Read<Vector128<Int32>>(_dataTable.inArrayPtr), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector128((Int32*)(_dataTable.inArrayPtr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftRightArithmetic), new Type[] { typeof(Vector128<Int32>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Int32>>(_dataTable.inArrayPtr), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftRightArithmetic), new Type[] { typeof(Vector128<Int32>), typeof(byte) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Int32*)(_dataTable.inArrayPtr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.ShiftRightArithmetic( _clsVar, 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Int32>* pClsVar = &_clsVar) { var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector128((Int32*)(pClsVar)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var firstOp = Unsafe.Read<Vector128<Int32>>(_dataTable.inArrayPtr); var result = AdvSimd.ShiftRightArithmetic(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var firstOp = AdvSimd.LoadVector128((Int32*)(_dataTable.inArrayPtr)); var result = AdvSimd.ShiftRightArithmetic(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ImmUnaryOpTest__ShiftRightArithmetic_Vector128_Int32_1(); var result = AdvSimd.ShiftRightArithmetic(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new ImmUnaryOpTest__ShiftRightArithmetic_Vector128_Int32_1(); fixed (Vector128<Int32>* pFld = &test._fld) { var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector128((Int32*)(pFld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.ShiftRightArithmetic(_fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Int32>* pFld = &_fld) { var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector128((Int32*)(pFld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.ShiftRightArithmetic(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector128((Int32*)(&test._fld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Int32> firstOp, void* result, [CallerMemberName] string method = "") { Int32[] inArray = new Int32[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray[0]), firstOp); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(void* firstOp, void* result, [CallerMemberName] string method = "") { Int32[] inArray = new Int32[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector128<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(Int32[] firstOp, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.ShiftRightArithmetic(firstOp[i], Imm) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ShiftRightArithmetic)}<Int32>(Vector128<Int32>, 1): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void ShiftRightArithmetic_Vector128_Int32_1() { var test = new ImmUnaryOpTest__ShiftRightArithmetic_Vector128_Int32_1(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ImmUnaryOpTest__ShiftRightArithmetic_Vector128_Int32_1 { private struct DataTable { private byte[] inArray; private byte[] outArray; private GCHandle inHandle; private GCHandle outHandle; private ulong alignment; public DataTable(Int32[] inArray, Int32[] outArray, int alignment) { int sizeOfinArray = inArray.Length * Unsafe.SizeOf<Int32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle = GCHandle.Alloc(this.inArray, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArrayPtr), ref Unsafe.As<Int32, byte>(ref inArray[0]), (uint)sizeOfinArray); } public void* inArrayPtr => Align((byte*)(inHandle.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int32> _fld; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld), ref Unsafe.As<Int32, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); return testStruct; } public void RunStructFldScenario(ImmUnaryOpTest__ShiftRightArithmetic_Vector128_Int32_1 testClass) { var result = AdvSimd.ShiftRightArithmetic(_fld, 1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(ImmUnaryOpTest__ShiftRightArithmetic_Vector128_Int32_1 testClass) { fixed (Vector128<Int32>* pFld = &_fld) { var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector128((Int32*)(pFld)), 1 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static readonly byte Imm = 1; private static Int32[] _data = new Int32[Op1ElementCount]; private static Vector128<Int32> _clsVar; private Vector128<Int32> _fld; private DataTable _dataTable; static ImmUnaryOpTest__ShiftRightArithmetic_Vector128_Int32_1() { for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar), ref Unsafe.As<Int32, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); } public ImmUnaryOpTest__ShiftRightArithmetic_Vector128_Int32_1() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld), ref Unsafe.As<Int32, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new DataTable(_data, new Int32[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.ShiftRightArithmetic( Unsafe.Read<Vector128<Int32>>(_dataTable.inArrayPtr), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector128((Int32*)(_dataTable.inArrayPtr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftRightArithmetic), new Type[] { typeof(Vector128<Int32>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Int32>>(_dataTable.inArrayPtr), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftRightArithmetic), new Type[] { typeof(Vector128<Int32>), typeof(byte) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Int32*)(_dataTable.inArrayPtr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.ShiftRightArithmetic( _clsVar, 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Int32>* pClsVar = &_clsVar) { var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector128((Int32*)(pClsVar)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var firstOp = Unsafe.Read<Vector128<Int32>>(_dataTable.inArrayPtr); var result = AdvSimd.ShiftRightArithmetic(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var firstOp = AdvSimd.LoadVector128((Int32*)(_dataTable.inArrayPtr)); var result = AdvSimd.ShiftRightArithmetic(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ImmUnaryOpTest__ShiftRightArithmetic_Vector128_Int32_1(); var result = AdvSimd.ShiftRightArithmetic(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new ImmUnaryOpTest__ShiftRightArithmetic_Vector128_Int32_1(); fixed (Vector128<Int32>* pFld = &test._fld) { var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector128((Int32*)(pFld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.ShiftRightArithmetic(_fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Int32>* pFld = &_fld) { var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector128((Int32*)(pFld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.ShiftRightArithmetic(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector128((Int32*)(&test._fld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Int32> firstOp, void* result, [CallerMemberName] string method = "") { Int32[] inArray = new Int32[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray[0]), firstOp); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(void* firstOp, void* result, [CallerMemberName] string method = "") { Int32[] inArray = new Int32[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector128<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(Int32[] firstOp, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.ShiftRightArithmetic(firstOp[i], Imm) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ShiftRightArithmetic)}<Int32>(Vector128<Int32>, 1): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/tests/JIT/CodeGenBringUpTests/FPMul.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // using System; using System.Runtime.CompilerServices; public class BringUpTest_FPMul { const int Pass = 100; const int Fail = -1; [MethodImplAttribute(MethodImplOptions.NoInlining)] public static float FPMul(float x, float y) { return x*y; } public static int Main() { float y = FPMul(7f, 9f); Console.WriteLine(y); if (System.Math.Abs(y-63f) <= Single.Epsilon) return Pass; else return Fail; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // using System; using System.Runtime.CompilerServices; public class BringUpTest_FPMul { const int Pass = 100; const int Fail = -1; [MethodImplAttribute(MethodImplOptions.NoInlining)] public static float FPMul(float x, float y) { return x*y; } public static int Main() { float y = FPMul(7f, 9f); Console.WriteLine(y); if (System.Math.Abs(y-63f) <= Single.Epsilon) return Pass; else return Fail; } }
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/tests/baseservices/RuntimeConfiguration/TestConfig.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Diagnostics; using System.IO; using System.Linq; using System.Text; using System.Reflection; using System.Runtime; using Xunit; class TestConfig { const int Success = 100; const int Fail = 101; [Fact] [EnvVar("DOTNET_gcServer", "1")] static int Verify_ServerGC_Env_Enable(string[] _) { return GCSettings.IsServerGC ? Success : Fail; } [Fact] [ConfigProperty("DOTNET_gcServer", "0")] static int Verify_ServerGC_Env_Disable(string[] _) { return GCSettings.IsServerGC ? Fail : Success; } [Fact] [ConfigProperty("System.GC.Server", "true")] static int Verify_ServerGC_Prop_Enable(string[] _) { return GCSettings.IsServerGC ? Success : Fail; } [Fact] [ConfigProperty("System.GC.Server", "false")] static int Verify_ServerGC_Prop_Disable(string[] _) { return GCSettings.IsServerGC ? Fail : Success; } [Fact] [EnvVar("DOTNET_gcServer", "0")] [ConfigProperty("System.GC.Server", "true")] static int Verify_ServerGC_Env_Override_Prop(string[] _) { return GCSettings.IsServerGC ? Fail : Success; } static int Main(string[] args) { if (args.Length == 0) { return RunTests(); } MethodInfo infos = typeof(TestConfig).GetMethod(args[0], BindingFlags.Static | BindingFlags.NonPublic | BindingFlags.Public); if (infos is null) { return Fail; } return (int)infos.Invoke(null, new object[] { args[1..] }); } [AttributeUsage(AttributeTargets.Method, AllowMultiple = true, Inherited = false)] class EnvVarAttribute : Attribute { public EnvVarAttribute(string name, string value) { Name = name; Value = value; } public string Name { get; init; } public string Value { get; init; } } [AttributeUsage(AttributeTargets.Method, AllowMultiple = true, Inherited = false)] class ConfigPropertyAttribute : Attribute { public ConfigPropertyAttribute(string name, string value) { Name = name; Value = value; } public string Name { get; init; } public string Value { get; init; } } static int RunTests() { string corerunPath = GetCorerunPath(); MethodInfo[] infos = typeof(TestConfig).GetMethods(BindingFlags.Static | BindingFlags.NonPublic | BindingFlags.Public); foreach (var mi in infos) { var factMaybe = mi.GetCustomAttributes(typeof(FactAttribute)); if (!factMaybe.Any()) { continue; } using Process process = new(); StringBuilder arguments = new(); var configProperties = mi.GetCustomAttributes(typeof(ConfigPropertyAttribute)); foreach (Attribute cp in configProperties) { ConfigPropertyAttribute configProp = (ConfigPropertyAttribute)cp; arguments.Append($"-p {configProp.Name}={configProp.Value} "); } arguments.Append($"\"{System.Reflection.Assembly.GetExecutingAssembly().Location}\" {mi.Name}"); process.StartInfo.FileName = corerunPath; process.StartInfo.Arguments = arguments.ToString(); var envVariables = mi.GetCustomAttributes(typeof(EnvVarAttribute)); foreach (string key in Environment.GetEnvironmentVariables().Keys) { process.StartInfo.EnvironmentVariables[key] = Environment.GetEnvironmentVariable(key); } Console.WriteLine($"Running: {process.StartInfo.Arguments}"); foreach (Attribute ev in envVariables) { EnvVarAttribute envVar = (EnvVarAttribute)ev; process.StartInfo.EnvironmentVariables[envVar.Name] = envVar.Value; Console.WriteLine($" set {envVar.Name}={envVar.Value}"); } process.Start(); process.WaitForExit(); if (process.ExitCode != Success) { Console.WriteLine($"Failed: {mi.Name}"); return process.ExitCode; } } return Success; } static string GetCorerunPath() { string corerunName = "corerun"; if (TestLibrary.Utilities.IsWindows) { corerunName += ".exe"; } return Path.Combine(Environment.GetEnvironmentVariable("CORE_ROOT"), corerunName); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Diagnostics; using System.IO; using System.Linq; using System.Text; using System.Reflection; using System.Runtime; using Xunit; class TestConfig { const int Success = 100; const int Fail = 101; [Fact] [EnvVar("DOTNET_gcServer", "1")] static int Verify_ServerGC_Env_Enable(string[] _) { return GCSettings.IsServerGC ? Success : Fail; } [Fact] [ConfigProperty("DOTNET_gcServer", "0")] static int Verify_ServerGC_Env_Disable(string[] _) { return GCSettings.IsServerGC ? Fail : Success; } [Fact] [ConfigProperty("System.GC.Server", "true")] static int Verify_ServerGC_Prop_Enable(string[] _) { return GCSettings.IsServerGC ? Success : Fail; } [Fact] [ConfigProperty("System.GC.Server", "false")] static int Verify_ServerGC_Prop_Disable(string[] _) { return GCSettings.IsServerGC ? Fail : Success; } [Fact] [EnvVar("DOTNET_gcServer", "0")] [ConfigProperty("System.GC.Server", "true")] static int Verify_ServerGC_Env_Override_Prop(string[] _) { return GCSettings.IsServerGC ? Fail : Success; } static int Main(string[] args) { if (args.Length == 0) { return RunTests(); } MethodInfo infos = typeof(TestConfig).GetMethod(args[0], BindingFlags.Static | BindingFlags.NonPublic | BindingFlags.Public); if (infos is null) { return Fail; } return (int)infos.Invoke(null, new object[] { args[1..] }); } [AttributeUsage(AttributeTargets.Method, AllowMultiple = true, Inherited = false)] class EnvVarAttribute : Attribute { public EnvVarAttribute(string name, string value) { Name = name; Value = value; } public string Name { get; init; } public string Value { get; init; } } [AttributeUsage(AttributeTargets.Method, AllowMultiple = true, Inherited = false)] class ConfigPropertyAttribute : Attribute { public ConfigPropertyAttribute(string name, string value) { Name = name; Value = value; } public string Name { get; init; } public string Value { get; init; } } static int RunTests() { string corerunPath = GetCorerunPath(); MethodInfo[] infos = typeof(TestConfig).GetMethods(BindingFlags.Static | BindingFlags.NonPublic | BindingFlags.Public); foreach (var mi in infos) { var factMaybe = mi.GetCustomAttributes(typeof(FactAttribute)); if (!factMaybe.Any()) { continue; } using Process process = new(); StringBuilder arguments = new(); var configProperties = mi.GetCustomAttributes(typeof(ConfigPropertyAttribute)); foreach (Attribute cp in configProperties) { ConfigPropertyAttribute configProp = (ConfigPropertyAttribute)cp; arguments.Append($"-p {configProp.Name}={configProp.Value} "); } arguments.Append($"\"{System.Reflection.Assembly.GetExecutingAssembly().Location}\" {mi.Name}"); process.StartInfo.FileName = corerunPath; process.StartInfo.Arguments = arguments.ToString(); var envVariables = mi.GetCustomAttributes(typeof(EnvVarAttribute)); foreach (string key in Environment.GetEnvironmentVariables().Keys) { process.StartInfo.EnvironmentVariables[key] = Environment.GetEnvironmentVariable(key); } Console.WriteLine($"Running: {process.StartInfo.Arguments}"); foreach (Attribute ev in envVariables) { EnvVarAttribute envVar = (EnvVarAttribute)ev; process.StartInfo.EnvironmentVariables[envVar.Name] = envVar.Value; Console.WriteLine($" set {envVar.Name}={envVar.Value}"); } process.Start(); process.WaitForExit(); if (process.ExitCode != Success) { Console.WriteLine($"Failed: {mi.Name}"); return process.ExitCode; } } return Success; } static string GetCorerunPath() { string corerunName = "corerun"; if (TestLibrary.Utilities.IsWindows) { corerunName += ".exe"; } return Path.Combine(Environment.GetEnvironmentVariable("CORE_ROOT"), corerunName); } }
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Runtime.props
<Project> <PropertyGroup> <IncludeFallbacksInDepsFile>true</IncludeFallbacksInDepsFile> <GetSharedFrameworkFilesForReadyToRunDependsOn> AddRuntimeFilesToPackage; AddFrameworkFilesToPackage </GetSharedFrameworkFilesForReadyToRunDependsOn> <PublishReadyToRun Condition="'$(RuntimeFlavor)' != 'Mono'">true</PublishReadyToRun> <PublishReadyToRun Condition="'$(RuntimeFlavor)' == 'Mono'">false</PublishReadyToRun> <!-- Disable crossgen on NetBSD, illumos and Solaris for now. This can be revisited when we have full support. --> <PublishReadyToRun Condition="'$(TargetOS)'=='NetBSD' Or '$(TargetOS)'=='illumos' Or '$(TargetOS)'=='Solaris'">false</PublishReadyToRun> <!-- Disable crossgen on FreeBSD when cross building from Linux. --> <PublishReadyToRun Condition="'$(TargetOS)'=='FreeBSD' and '$(CrossBuild)'=='true'">false</PublishReadyToRun> <!-- These components are installed by the root shared framework, but not others. --> <IncludeWerRelatedKeys>true</IncludeWerRelatedKeys> <IncludeBreadcrumbStoreFolder>true</IncludeBreadcrumbStoreFolder> <MacOSPackageDescription>The .NET Shared Framework</MacOSPackageDescription> </PropertyGroup> <PropertyGroup Condition="'$(RuntimeFlavor)' == 'Mono' and '$(RuntimeFlavor)' != '$(PrimaryRuntimeFlavor)'"> <RuntimeSpecificFrameworkSuffix>Mono</RuntimeSpecificFrameworkSuffix> </PropertyGroup> <PropertyGroup Condition="'$(MonoEnableLLVM)' == 'true' and '$(RuntimeFlavor)' == 'Mono' and '$(TargetsMobile)' != 'true' and '$(TargetsBrowser)' != 'true'"> <RuntimeSpecificFrameworkSuffix>Mono.LLVM</RuntimeSpecificFrameworkSuffix> </PropertyGroup> <PropertyGroup Condition="'$(MonoBundleLLVMOptimizer)' == 'true' and '$(RuntimeFlavor)' == 'Mono' and '$(TargetsMobile)' != 'true' and '$(TargetsBrowser)' != 'true'"> <RuntimeSpecificFrameworkSuffix>Mono.LLVM.AOT</RuntimeSpecificFrameworkSuffix> </PropertyGroup> <PropertyGroup Condition="'$(RuntimeSpecificFrameworkSuffix)' != ''"> <OverridePackageId>$(SharedFrameworkName).Runtime.$(RuntimeSpecificFrameworkSuffix).$(RuntimeIdentifier)</OverridePackageId> </PropertyGroup> <!-- hostpolicy and hostfxr aren't in the platform manifest in the ref pack and cannot be without breaking things upstack. We add the entries here to ensure that we don't fail the validation that every file included in the runtime pack is in the platform manifest without adding the entries to the manifest in the ref pack. --> <ItemGroup> <PlatformManifestFileEntry Include="hostpolicy.dll" IsNative="true" /> <PlatformManifestFileEntry Include="libhostpolicy.so" IsNative="true" /> <PlatformManifestFileEntry Include="libhostpolicy.dylib" IsNative="true" /> <PlatformManifestFileEntry Include="hostfxr.dll" IsNative="true" /> <PlatformManifestFileEntry Include="libhostfxr.so" IsNative="true" /> <PlatformManifestFileEntry Include="libhostfxr.dylib" IsNative="true" /> </ItemGroup> <Target Name="AddLinuxPackageInformation" BeforeTargets="GetDebInstallerJsonProperties;GetRpmInstallerJsonProperties"> <ItemGroup> <LinuxPackageDependency Include="dotnet-hostfxr-$(MajorVersion).$(MinorVersion);dotnet-runtime-deps-$(MajorVersion).$(MinorVersion)" Version="$(InstallerPackageVersion)" /> </ItemGroup> </Target> <!-- Mobile uses a different hosting model, so we don't include the .NET host components. --> <ItemGroup Condition="'$(TargetsMobile)' != 'true'"> <NativeRuntimeAsset Include="$(DotNetHostBinDir)/$(LibPrefix)hostpolicy$(LibSuffix)" /> <NativeRuntimeAsset Include="$(DotNetHostBinDir)/$(LibPrefix)hostfxr$(LibSuffix)" PackOnly="true" /> </ItemGroup> <Target Name="AddRuntimeFilesToPackage" DependsOnTargets="ResolveRuntimeFilesFromLocalBuild"> <ItemGroup> <RuntimeFiles Condition="'%(RuntimeFiles.IsNative)' == 'true'"> <TargetPath>runtimes/$(RuntimeIdentifier)/native</TargetPath> </RuntimeFiles> <RuntimeFiles Condition="'%(FileName)' == 'crossgen'"> <TargetPath>tools</TargetPath> </RuntimeFiles> <RuntimeFiles Condition="'$(TargetsMobile)' == 'true'" Include="@(MonoIncludeFiles)" ExcludeFromDataFiles="true"> <TargetPath>runtimes/$(RuntimeIdentifier)/native/include/%(RecursiveDir)</TargetPath> </RuntimeFiles> <RuntimeFiles Condition="'$(TargetsMacCatalyst)' == 'true' or '$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true'" Include="@(FrameworkReleaseFiles)" ExcludeFromDataFiles="true"> <TargetPath>runtimes/$(RuntimeIdentifier)/native/Mono.release.framework/%(RecursiveDir)</TargetPath> </RuntimeFiles> <RuntimeFiles Condition="'$(TargetsMacCatalyst)' == 'true' or '$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true'" Include="@(FrameworkDebugFiles)" ExcludeFromDataFiles="true"> <TargetPath>runtimes/$(RuntimeIdentifier)/native/Mono.debug.framework/%(RecursiveDir)</TargetPath> </RuntimeFiles> <RuntimeFiles Condition="'$(RuntimeFlavor)' == 'mono'" Include="$(MonoArtifactsPath)\build\**\*.*" ExcludeFromDataFiles="true"> <TargetPath>runtimes/$(RuntimeIdentifier)/build/%(RecursiveDir)</TargetPath> </RuntimeFiles> <CoreCLRCrossTargetFiles PackOnly="true" /> <CoreCLRCrossTargetFiles Condition="'%(FileName)' == 'clrjit' or '%(FileName)' == 'libclrjit'"> <TargetPath>runtimes/$(CoreCLRCrossTargetComponentDirName)_$(TargetArchitecture)/native</TargetPath> </CoreCLRCrossTargetFiles> <CoreCLRCrossTargetFiles Condition="'%(FileName)' == 'crossgen'"> <TargetPath>tools/$(CoreCLRCrossTargetComponentDirName)_$(TargetArchitecture)</TargetPath> </CoreCLRCrossTargetFiles> <CoreCLRCrossTargetFiles Condition="$([System.String]::new('%(FileName)').StartsWith('mscordaccore')) and '$(TargetsWindows)' == 'true'"> <TargetPath>tools/$(CoreCLRCrossTargetComponentDirName)_$(TargetArchitecture)</TargetPath> </CoreCLRCrossTargetFiles> <CoreCLRCrossTargetFiles Condition="'%(FileName)%(Extension)' == 'mscordbi.dll' and '$(TargetsWindows)' == 'true'"> <TargetPath>tools/$(CoreCLRCrossTargetComponentDirName)_$(TargetArchitecture)</TargetPath> </CoreCLRCrossTargetFiles> <CoreCLROptimizationFiles Include="$(CoreCLRArtifactsPath)StandardOptimizationData.mibc" Condition="Exists('$(CoreCLRArtifactsPath)StandardOptimizationData.mibc')"> <TargetPath>tools</TargetPath> </CoreCLROptimizationFiles> <ReferenceCopyLocalPaths Include="@(RuntimeFiles);@(CoreCLRCrossTargetFiles);@(CoreCLROptimizationFiles)" /> </ItemGroup> </Target> <Target Name="AddFrameworkFilesToPackage" DependsOnTargets="ResolveLibrariesFromLocalBuild"> <ItemGroup> <ReferenceCopyLocalPaths Include="@(LibrariesRuntimeFiles)" Condition="'%(LibrariesRuntimeFiles.Extension)' != '.a' or '$(TargetsMobile)' == 'true'"> <TargetPath Condition="'%(LibrariesRuntimeFiles.NativeSubDirectory)' != ''">runtimes/$(RuntimeIdentifier)/native/%(LibrariesRuntimeFiles.NativeSubDirectory)%(RecursiveDir)</TargetPath> </ReferenceCopyLocalPaths> </ItemGroup> </Target> <PropertyGroup Condition="'$(TargetOS)' == 'windows'"> <!-- DiaSymReader for the host architecture, which is used for [cross-]compilation --> <_diaSymArch>$(_hostArch)</_diaSymArch> <_diaSymReaderPath>$(PkgMicrosoft_DiaSymReader_Native)/runtimes/win/native/Microsoft.DiaSymReader.Native.$(_diaSymArch).dll</_diaSymReaderPath> <!-- DiaSymReader for the target architecture, which is placed into the package --> <_diaSymTargetArch>$(TargetArchitecture)</_diaSymTargetArch> <_diaSymTargetArch Condition="'$(TargetArchitecture)' == 'x64'">amd64</_diaSymTargetArch> <_diaSymReaderTargetArchPath>$(PkgMicrosoft_DiaSymReader_Native)/runtimes/win/native/Microsoft.DiaSymReader.Native.$(_diaSymTargetArch).dll</_diaSymReaderTargetArchPath> </PropertyGroup> <ItemGroup Condition="'$(TargetOS)' == 'windows'"> <NativeRuntimeAsset Include="$(_diaSymReaderTargetArchPath)" /> <NativeRuntimeAsset Include="$(_diaSymReaderPath)" Condition="'$(CoreCLRCrossTargetComponentDirName)' != ''"> <TargetPath>runtimes/$(CoreCLRCrossTargetComponentDirName)_$(TargetArchitecture)/native</TargetPath> </NativeRuntimeAsset> </ItemGroup> <!-- VS uses this file to show the target framework in the drop down. --> <Target Name="CreateDotVersionFile" DependsOnTargets="InitializeSourceControlInformationFromSourceControlManager" BeforeTargets="GetFilesToPublish" Condition="'$(DisableSourceLink)' != 'true'"> <ItemGroup> <_VersionFile Include="$(IntermediateOutputPath).version" TargetPath="shared/$(SharedFrameworkName)/$(Version)/" /> </ItemGroup> <WriteLinesToFile Lines="$(SourceRevisionId);$(Version)" File="@(_VersionFile)" Overwrite="true" WriteOnlyWhenDifferent="true" /> <ItemGroup> <FilesToPublish Include="@(_VersionFile)" /> <FileWrites Include="@(_VersionFile)" /> </ItemGroup> </Target> <Import Project="$(Crossgen2SdkOverridePropsPath)" Condition="'$(Crossgen2SdkOverridePropsPath)' != ''" /> <Import Project="Sdk.targets" Sdk="Microsoft.NET.Sdk" /> <Import Project="Sdk.targets" Sdk="Microsoft.DotNet.SharedFramework.Sdk" /> <Import Project="$(Crossgen2SdkOverrideTargetsPath)" Condition="'$(Crossgen2SdkOverrideTargetsPath)' != ''" /> <PropertyGroup> <PublishReadyToRunComposite Condition="$(ForcePublishReadyToRunComposite) == 'true'">true</PublishReadyToRunComposite> </PropertyGroup> <!-- Put the mibc file into tools and not into PgoData, which will also hide it from being part of the RuntimeList.xml --> <Target Name="RemoveMibcFromRuntimeListXml" AfterTargets="GetFilesToPackage"> <ItemGroup> <FilesToPackageMibcData Include="@(FilesToPackage)" Condition="'%(FilesToPackage.Identity)' == '$(CoreCLRArtifactsPath)StandardOptimizationData.mibc'"> <TargetPath>tools</TargetPath> </FilesToPackageMibcData> <FilesToPackage Remove="$(CoreCLRArtifactsPath)StandardOptimizationData.mibc"/> <FilesToPackage Include="@(FilesToPackageMibcData)"/> </ItemGroup> </Target> </Project>
<Project> <PropertyGroup> <IncludeFallbacksInDepsFile>true</IncludeFallbacksInDepsFile> <GetSharedFrameworkFilesForReadyToRunDependsOn> AddRuntimeFilesToPackage; AddFrameworkFilesToPackage </GetSharedFrameworkFilesForReadyToRunDependsOn> <PublishReadyToRun Condition="'$(RuntimeFlavor)' != 'Mono'">true</PublishReadyToRun> <PublishReadyToRun Condition="'$(RuntimeFlavor)' == 'Mono'">false</PublishReadyToRun> <!-- Disable crossgen on NetBSD, illumos and Solaris for now. This can be revisited when we have full support. --> <PublishReadyToRun Condition="'$(TargetOS)'=='NetBSD' Or '$(TargetOS)'=='illumos' Or '$(TargetOS)'=='Solaris'">false</PublishReadyToRun> <!-- Disable crossgen on FreeBSD when cross building from Linux. --> <PublishReadyToRun Condition="'$(TargetOS)'=='FreeBSD' and '$(CrossBuild)'=='true'">false</PublishReadyToRun> <!-- These components are installed by the root shared framework, but not others. --> <IncludeWerRelatedKeys>true</IncludeWerRelatedKeys> <IncludeBreadcrumbStoreFolder>true</IncludeBreadcrumbStoreFolder> <MacOSPackageDescription>The .NET Shared Framework</MacOSPackageDescription> </PropertyGroup> <PropertyGroup Condition="'$(RuntimeFlavor)' == 'Mono' and '$(RuntimeFlavor)' != '$(PrimaryRuntimeFlavor)'"> <RuntimeSpecificFrameworkSuffix>Mono</RuntimeSpecificFrameworkSuffix> </PropertyGroup> <PropertyGroup Condition="'$(MonoEnableLLVM)' == 'true' and '$(RuntimeFlavor)' == 'Mono' and '$(TargetsMobile)' != 'true' and '$(TargetsBrowser)' != 'true'"> <RuntimeSpecificFrameworkSuffix>Mono.LLVM</RuntimeSpecificFrameworkSuffix> </PropertyGroup> <PropertyGroup Condition="'$(MonoBundleLLVMOptimizer)' == 'true' and '$(RuntimeFlavor)' == 'Mono' and '$(TargetsMobile)' != 'true' and '$(TargetsBrowser)' != 'true'"> <RuntimeSpecificFrameworkSuffix>Mono.LLVM.AOT</RuntimeSpecificFrameworkSuffix> </PropertyGroup> <PropertyGroup Condition="'$(RuntimeSpecificFrameworkSuffix)' != ''"> <OverridePackageId>$(SharedFrameworkName).Runtime.$(RuntimeSpecificFrameworkSuffix).$(RuntimeIdentifier)</OverridePackageId> </PropertyGroup> <!-- hostpolicy and hostfxr aren't in the platform manifest in the ref pack and cannot be without breaking things upstack. We add the entries here to ensure that we don't fail the validation that every file included in the runtime pack is in the platform manifest without adding the entries to the manifest in the ref pack. --> <ItemGroup> <PlatformManifestFileEntry Include="hostpolicy.dll" IsNative="true" /> <PlatformManifestFileEntry Include="libhostpolicy.so" IsNative="true" /> <PlatformManifestFileEntry Include="libhostpolicy.dylib" IsNative="true" /> <PlatformManifestFileEntry Include="hostfxr.dll" IsNative="true" /> <PlatformManifestFileEntry Include="libhostfxr.so" IsNative="true" /> <PlatformManifestFileEntry Include="libhostfxr.dylib" IsNative="true" /> </ItemGroup> <Target Name="AddLinuxPackageInformation" BeforeTargets="GetDebInstallerJsonProperties;GetRpmInstallerJsonProperties"> <ItemGroup> <LinuxPackageDependency Include="dotnet-hostfxr-$(MajorVersion).$(MinorVersion);dotnet-runtime-deps-$(MajorVersion).$(MinorVersion)" Version="$(InstallerPackageVersion)" /> </ItemGroup> </Target> <!-- Mobile uses a different hosting model, so we don't include the .NET host components. --> <ItemGroup Condition="'$(TargetsMobile)' != 'true'"> <NativeRuntimeAsset Include="$(DotNetHostBinDir)/$(LibPrefix)hostpolicy$(LibSuffix)" /> <NativeRuntimeAsset Include="$(DotNetHostBinDir)/$(LibPrefix)hostfxr$(LibSuffix)" PackOnly="true" /> </ItemGroup> <Target Name="AddRuntimeFilesToPackage" DependsOnTargets="ResolveRuntimeFilesFromLocalBuild"> <ItemGroup> <RuntimeFiles Condition="'%(RuntimeFiles.IsNative)' == 'true'"> <TargetPath>runtimes/$(RuntimeIdentifier)/native</TargetPath> </RuntimeFiles> <RuntimeFiles Condition="'%(FileName)' == 'crossgen'"> <TargetPath>tools</TargetPath> </RuntimeFiles> <RuntimeFiles Condition="'$(TargetsMobile)' == 'true'" Include="@(MonoIncludeFiles)" ExcludeFromDataFiles="true"> <TargetPath>runtimes/$(RuntimeIdentifier)/native/include/%(RecursiveDir)</TargetPath> </RuntimeFiles> <RuntimeFiles Condition="'$(TargetsMacCatalyst)' == 'true' or '$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true'" Include="@(FrameworkReleaseFiles)" ExcludeFromDataFiles="true"> <TargetPath>runtimes/$(RuntimeIdentifier)/native/Mono.release.framework/%(RecursiveDir)</TargetPath> </RuntimeFiles> <RuntimeFiles Condition="'$(TargetsMacCatalyst)' == 'true' or '$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true'" Include="@(FrameworkDebugFiles)" ExcludeFromDataFiles="true"> <TargetPath>runtimes/$(RuntimeIdentifier)/native/Mono.debug.framework/%(RecursiveDir)</TargetPath> </RuntimeFiles> <RuntimeFiles Condition="'$(RuntimeFlavor)' == 'mono'" Include="$(MonoArtifactsPath)\build\**\*.*" ExcludeFromDataFiles="true"> <TargetPath>runtimes/$(RuntimeIdentifier)/build/%(RecursiveDir)</TargetPath> </RuntimeFiles> <CoreCLRCrossTargetFiles PackOnly="true" /> <CoreCLRCrossTargetFiles Condition="'%(FileName)' == 'clrjit' or '%(FileName)' == 'libclrjit'"> <TargetPath>runtimes/$(CoreCLRCrossTargetComponentDirName)_$(TargetArchitecture)/native</TargetPath> </CoreCLRCrossTargetFiles> <CoreCLRCrossTargetFiles Condition="'%(FileName)' == 'crossgen'"> <TargetPath>tools/$(CoreCLRCrossTargetComponentDirName)_$(TargetArchitecture)</TargetPath> </CoreCLRCrossTargetFiles> <CoreCLRCrossTargetFiles Condition="$([System.String]::new('%(FileName)').StartsWith('mscordaccore')) and '$(TargetsWindows)' == 'true'"> <TargetPath>tools/$(CoreCLRCrossTargetComponentDirName)_$(TargetArchitecture)</TargetPath> </CoreCLRCrossTargetFiles> <CoreCLRCrossTargetFiles Condition="'%(FileName)%(Extension)' == 'mscordbi.dll' and '$(TargetsWindows)' == 'true'"> <TargetPath>tools/$(CoreCLRCrossTargetComponentDirName)_$(TargetArchitecture)</TargetPath> </CoreCLRCrossTargetFiles> <CoreCLROptimizationFiles Include="$(CoreCLRArtifactsPath)StandardOptimizationData.mibc" Condition="Exists('$(CoreCLRArtifactsPath)StandardOptimizationData.mibc')"> <TargetPath>tools</TargetPath> </CoreCLROptimizationFiles> <ReferenceCopyLocalPaths Include="@(RuntimeFiles);@(CoreCLRCrossTargetFiles);@(CoreCLROptimizationFiles)" /> </ItemGroup> </Target> <Target Name="AddFrameworkFilesToPackage" DependsOnTargets="ResolveLibrariesFromLocalBuild"> <ItemGroup> <ReferenceCopyLocalPaths Include="@(LibrariesRuntimeFiles)" Condition="'%(LibrariesRuntimeFiles.Extension)' != '.a' or '$(TargetsMobile)' == 'true'"> <TargetPath Condition="'%(LibrariesRuntimeFiles.NativeSubDirectory)' != ''">runtimes/$(RuntimeIdentifier)/native/%(LibrariesRuntimeFiles.NativeSubDirectory)%(RecursiveDir)</TargetPath> </ReferenceCopyLocalPaths> </ItemGroup> </Target> <PropertyGroup Condition="'$(TargetOS)' == 'windows'"> <!-- DiaSymReader for the host architecture, which is used for [cross-]compilation --> <_diaSymArch>$(_hostArch)</_diaSymArch> <_diaSymReaderPath>$(PkgMicrosoft_DiaSymReader_Native)/runtimes/win/native/Microsoft.DiaSymReader.Native.$(_diaSymArch).dll</_diaSymReaderPath> <!-- DiaSymReader for the target architecture, which is placed into the package --> <_diaSymTargetArch>$(TargetArchitecture)</_diaSymTargetArch> <_diaSymTargetArch Condition="'$(TargetArchitecture)' == 'x64'">amd64</_diaSymTargetArch> <_diaSymReaderTargetArchPath>$(PkgMicrosoft_DiaSymReader_Native)/runtimes/win/native/Microsoft.DiaSymReader.Native.$(_diaSymTargetArch).dll</_diaSymReaderTargetArchPath> </PropertyGroup> <ItemGroup Condition="'$(TargetOS)' == 'windows'"> <NativeRuntimeAsset Include="$(_diaSymReaderTargetArchPath)" /> <NativeRuntimeAsset Include="$(_diaSymReaderPath)" Condition="'$(CoreCLRCrossTargetComponentDirName)' != ''"> <TargetPath>runtimes/$(CoreCLRCrossTargetComponentDirName)_$(TargetArchitecture)/native</TargetPath> </NativeRuntimeAsset> </ItemGroup> <!-- VS uses this file to show the target framework in the drop down. --> <Target Name="CreateDotVersionFile" DependsOnTargets="InitializeSourceControlInformationFromSourceControlManager" BeforeTargets="GetFilesToPublish" Condition="'$(DisableSourceLink)' != 'true'"> <ItemGroup> <_VersionFile Include="$(IntermediateOutputPath).version" TargetPath="shared/$(SharedFrameworkName)/$(Version)/" /> </ItemGroup> <WriteLinesToFile Lines="$(SourceRevisionId);$(Version)" File="@(_VersionFile)" Overwrite="true" WriteOnlyWhenDifferent="true" /> <ItemGroup> <FilesToPublish Include="@(_VersionFile)" /> <FileWrites Include="@(_VersionFile)" /> </ItemGroup> </Target> <Import Project="$(Crossgen2SdkOverridePropsPath)" Condition="'$(Crossgen2SdkOverridePropsPath)' != ''" /> <Import Project="Sdk.targets" Sdk="Microsoft.NET.Sdk" /> <Import Project="Sdk.targets" Sdk="Microsoft.DotNet.SharedFramework.Sdk" /> <Import Project="$(Crossgen2SdkOverrideTargetsPath)" Condition="'$(Crossgen2SdkOverrideTargetsPath)' != ''" /> <PropertyGroup> <PublishReadyToRunComposite Condition="$(ForcePublishReadyToRunComposite) == 'true'">true</PublishReadyToRunComposite> </PropertyGroup> <!-- Put the mibc file into tools and not into PgoData, which will also hide it from being part of the RuntimeList.xml --> <Target Name="RemoveMibcFromRuntimeListXml" AfterTargets="GetFilesToPackage"> <ItemGroup> <FilesToPackageMibcData Include="@(FilesToPackage)" Condition="'%(FilesToPackage.Identity)' == '$(CoreCLRArtifactsPath)StandardOptimizationData.mibc'"> <TargetPath>tools</TargetPath> </FilesToPackageMibcData> <FilesToPackage Remove="$(CoreCLRArtifactsPath)StandardOptimizationData.mibc"/> <FilesToPackage Include="@(FilesToPackageMibcData)"/> </ItemGroup> </Target> </Project>
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/tests/JIT/Directed/StructPromote/SP1d.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // using System.Runtime.CompilerServices; using System; class SP1d { // Struct in reg (2 ints) struct S { public int i0; public int i1; } [MethodImpl(MethodImplOptions.NoInlining)] static int Foo(int i, int j, int k, int k2, int k3, S s) { return 1000000 * s.i0 + 100000 * s.i1 + 10000 * i + 1000 * j + 100 * k + 10 * k2 + k3; } [MethodImpl(MethodImplOptions.NoInlining)] static int M(int i0, int i1, int i2, int i3, int i4, int i5, int i6) { S s; s.i0 = i3; s.i1 = i2; return Foo(i1, i0, i4, i5, i6, s); // r0 <= r1; r1 <= r0; r2 <= inarg[0]; r3 <= inarg[4]; // outarg[0] <= inarg[8]; outarg[4] <= r3; outarg[8] <= r2 } public static int Main(String[] args) { int res = M(4, 5, 6, 7, 3, 2, 1); Console.WriteLine("M(4, 5, 6, 7, 3, 2, 1) is {0}.", res); if (res == 7654321) return 100; else return 99; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // using System.Runtime.CompilerServices; using System; class SP1d { // Struct in reg (2 ints) struct S { public int i0; public int i1; } [MethodImpl(MethodImplOptions.NoInlining)] static int Foo(int i, int j, int k, int k2, int k3, S s) { return 1000000 * s.i0 + 100000 * s.i1 + 10000 * i + 1000 * j + 100 * k + 10 * k2 + k3; } [MethodImpl(MethodImplOptions.NoInlining)] static int M(int i0, int i1, int i2, int i3, int i4, int i5, int i6) { S s; s.i0 = i3; s.i1 = i2; return Foo(i1, i0, i4, i5, i6, s); // r0 <= r1; r1 <= r0; r2 <= inarg[0]; r3 <= inarg[4]; // outarg[0] <= inarg[8]; outarg[4] <= r3; outarg[8] <= r2 } public static int Main(String[] args) { int res = M(4, 5, 6, 7, 3, 2, 1); Console.WriteLine("M(4, 5, 6, 7, 3, 2, 1) is {0}.", res); if (res == 7654321) return 100; else return 99; } }
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/tests/Loader/classloader/generics/Instantiation/Positive/MultipleInterface14.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; public interface IGenX<T> { string m(T t); } public interface IGenY<T> { string m(T tArr); } struct Gen<T,U> : IGenX<T>, IGenY<U> { string IGenX<T>.m(T t) { return "IGenX.m"; } string IGenY<U>.m(U tArr) { return "IGenY.m"; } } public class Test_MultipleInterface14 { public static int counter = 0; public static bool result = true; public static void Eval(bool exp) { counter++; if (!exp) { result = exp; Console.WriteLine("Test Failed at location: " + counter); } } public static int Main() { Gen<int,int> GenIntInt = new Gen<int,int>(); Eval(((IGenX<int>)GenIntInt).m(5).Equals("IGenX.m")); Eval(((IGenY<int>)GenIntInt).m(5).Equals("IGenY.m")); Gen<int,string> GenIntString = new Gen<int,string>(); Eval(((IGenX<int>)GenIntString).m(5).Equals("IGenX.m")); Eval(((IGenY<string>)GenIntString).m("S").Equals("IGenY.m")); Gen<string,int> GenStringInt = new Gen<string,int>(); Eval(((IGenX<string>)GenStringInt).m("S").Equals("IGenX.m")); Eval(((IGenY<int>)GenStringInt).m(5).Equals("IGenY.m")); Gen<string,string> GenStringString = new Gen<string,string>(); Eval(((IGenX<string>)GenStringString).m("S").Equals("IGenX.m")); Eval(((IGenY<string>)GenStringString).m("S").Equals("IGenY.m")); if (result) { Console.WriteLine("Test Passed"); return 100; } else { Console.WriteLine("Test Failed"); return 1; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; public interface IGenX<T> { string m(T t); } public interface IGenY<T> { string m(T tArr); } struct Gen<T,U> : IGenX<T>, IGenY<U> { string IGenX<T>.m(T t) { return "IGenX.m"; } string IGenY<U>.m(U tArr) { return "IGenY.m"; } } public class Test_MultipleInterface14 { public static int counter = 0; public static bool result = true; public static void Eval(bool exp) { counter++; if (!exp) { result = exp; Console.WriteLine("Test Failed at location: " + counter); } } public static int Main() { Gen<int,int> GenIntInt = new Gen<int,int>(); Eval(((IGenX<int>)GenIntInt).m(5).Equals("IGenX.m")); Eval(((IGenY<int>)GenIntInt).m(5).Equals("IGenY.m")); Gen<int,string> GenIntString = new Gen<int,string>(); Eval(((IGenX<int>)GenIntString).m(5).Equals("IGenX.m")); Eval(((IGenY<string>)GenIntString).m("S").Equals("IGenY.m")); Gen<string,int> GenStringInt = new Gen<string,int>(); Eval(((IGenX<string>)GenStringInt).m("S").Equals("IGenX.m")); Eval(((IGenY<int>)GenStringInt).m(5).Equals("IGenY.m")); Gen<string,string> GenStringString = new Gen<string,string>(); Eval(((IGenX<string>)GenStringString).m("S").Equals("IGenX.m")); Eval(((IGenY<string>)GenStringString).m("S").Equals("IGenY.m")); if (result) { Console.WriteLine("Test Passed"); return 100; } else { Console.WriteLine("Test Failed"); return 1; } } }
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/tests/JIT/jit64/gc/regress/vswhidbey/339415.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="339415.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="339415.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest451/Generated451.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 } .assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) } //TYPES IN FORWARDER ASSEMBLIES: //TEST ASSEMBLY: .assembly Generated451 { .hash algorithm 0x00008004 } .assembly extern xunit.core {} .class public BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class public BaseClass1 extends BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void BaseClass0::.ctor() ret } } .class public sequential sealed MyStruct501`2<T0, T1> extends [mscorlib]System.ValueType implements class IBase2`2<class BaseClass1,class BaseClass1>, class IBase2`2<class BaseClass1,class BaseClass0> { .pack 0 .size 1 .method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining { ldstr "MyStruct501::Method7.3906<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase2<class BaseClass1,class BaseClass1>.Method7'<M0>() cil managed noinlining { .override method instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<[1]>() ldstr "MyStruct501::Method7.MI.3907<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase2<class BaseClass1,class BaseClass0>.Method7'<M0>() cil managed noinlining { .override method instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<[1]>() ldstr "MyStruct501::Method7.MI.3909<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot instance string ClassMethod992() cil managed noinlining { ldstr "MyStruct501::ClassMethod992.3910()" ret } .method public hidebysig newslot instance string ClassMethod993<M0>() cil managed noinlining { ldstr "MyStruct501::ClassMethod993.3911<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot instance string ClassMethod994<M0>() cil managed noinlining { ldstr "MyStruct501::ClassMethod994.3912<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig virtual instance bool Equals(object obj) cil managed { ldc.i4.0 ret } .method public hidebysig virtual instance int32 GetHashCode() cil managed { ldc.i4.0 ret } .method public hidebysig virtual instance string ToString() cil managed { ldstr "" ret } } .class interface public abstract IBase2`2<+T0, -T1> { .method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { } } .class public auto ansi beforefieldinit Generated451 { .method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.MyStruct501.T.T<T0,T1,(valuetype MyStruct501`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 7 .locals init (string[] actualResults) ldc.i4.s 2 newarr string stloc.s actualResults ldarg.1 ldstr "M.MyStruct501.T.T<T0,T1,(valuetype MyStruct501`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 2 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. valuetype MyStruct501`2<!!T0,!!T1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. valuetype MyStruct501`2<!!T0,!!T1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.MyStruct501.A.T<T1,(valuetype MyStruct501`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 7 .locals init (string[] actualResults) ldc.i4.s 2 newarr string stloc.s actualResults ldarg.1 ldstr "M.MyStruct501.A.T<T1,(valuetype MyStruct501`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 2 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass0,!!T1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass0,!!T1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.MyStruct501.A.A<(valuetype MyStruct501`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 7 .locals init (string[] actualResults) ldc.i4.s 2 newarr string stloc.s actualResults ldarg.1 ldstr "M.MyStruct501.A.A<(valuetype MyStruct501`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 2 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass0,class BaseClass0> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass0,class BaseClass0> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.MyStruct501.A.B<(valuetype MyStruct501`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 7 .locals init (string[] actualResults) ldc.i4.s 2 newarr string stloc.s actualResults ldarg.1 ldstr "M.MyStruct501.A.B<(valuetype MyStruct501`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 2 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass0,class BaseClass1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass0,class BaseClass1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.MyStruct501.B.T<T1,(valuetype MyStruct501`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 7 .locals init (string[] actualResults) ldc.i4.s 2 newarr string stloc.s actualResults ldarg.1 ldstr "M.MyStruct501.B.T<T1,(valuetype MyStruct501`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 2 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass1,!!T1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass1,!!T1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.MyStruct501.B.A<(valuetype MyStruct501`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 7 .locals init (string[] actualResults) ldc.i4.s 2 newarr string stloc.s actualResults ldarg.1 ldstr "M.MyStruct501.B.A<(valuetype MyStruct501`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 2 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass1,class BaseClass0> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass1,class BaseClass0> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.MyStruct501.B.B<(valuetype MyStruct501`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 7 .locals init (string[] actualResults) ldc.i4.s 2 newarr string stloc.s actualResults ldarg.1 ldstr "M.MyStruct501.B.B<(valuetype MyStruct501`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 2 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass1,class BaseClass1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass1,class BaseClass1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method public hidebysig static void MethodCallingTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calling Test ==========================" call void [mscorlib]System.Console::WriteLine(string) .locals init (valuetype MyStruct501`2<class BaseClass0,class BaseClass0> V_1) ldloca V_1 initobj valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloca V_1 dup call instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "MyStruct501::Method7.3906<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass0> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::ClassMethod992() ldstr "MyStruct501::ClassMethod992.3910()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass0> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::ClassMethod993<object>() ldstr "MyStruct501::ClassMethod993.3911<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass0> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::ClassMethod994<object>() ldstr "MyStruct501::ClassMethod994.3912<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass0> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup ldnull call instance bool valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::Equals(object) pop dup call instance int32 valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::GetHashCode() pop dup call instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::ToString() pop pop ldloc V_1 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_1 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_1 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_1 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop .locals init (valuetype MyStruct501`2<class BaseClass0,class BaseClass1> V_2) ldloca V_2 initobj valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloca V_2 dup call instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "MyStruct501::Method7.3906<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass1> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::ClassMethod992() ldstr "MyStruct501::ClassMethod992.3910()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass1> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::ClassMethod993<object>() ldstr "MyStruct501::ClassMethod993.3911<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass1> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::ClassMethod994<object>() ldstr "MyStruct501::ClassMethod994.3912<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass1> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup ldnull call instance bool valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::Equals(object) pop dup call instance int32 valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::GetHashCode() pop dup call instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::ToString() pop pop ldloc V_2 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_2 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_2 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_2 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop .locals init (valuetype MyStruct501`2<class BaseClass1,class BaseClass0> V_3) ldloca V_3 initobj valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloca V_3 dup call instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "MyStruct501::Method7.3906<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass0> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::ClassMethod992() ldstr "MyStruct501::ClassMethod992.3910()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass0> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::ClassMethod993<object>() ldstr "MyStruct501::ClassMethod993.3911<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass0> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::ClassMethod994<object>() ldstr "MyStruct501::ClassMethod994.3912<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass0> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup ldnull call instance bool valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::Equals(object) pop dup call instance int32 valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::GetHashCode() pop dup call instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::ToString() pop pop ldloc V_3 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_3 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_3 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_3 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop .locals init (valuetype MyStruct501`2<class BaseClass1,class BaseClass1> V_4) ldloca V_4 initobj valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloca V_4 dup call instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "MyStruct501::Method7.3906<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass1> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::ClassMethod992() ldstr "MyStruct501::ClassMethod992.3910()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass1> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::ClassMethod993<object>() ldstr "MyStruct501::ClassMethod993.3911<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass1> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::ClassMethod994<object>() ldstr "MyStruct501::ClassMethod994.3912<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass1> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup ldnull call instance bool valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::Equals(object) pop dup call instance int32 valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::GetHashCode() pop dup call instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::ToString() pop pop ldloc V_4 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_4 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_4 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_4 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void ConstrainedCallsTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Constrained Calls Test ==========================" call void [mscorlib]System.Console::WriteLine(string) .locals init (valuetype MyStruct501`2<class BaseClass0,class BaseClass0> V_5) ldloca V_5 initobj valuetype MyStruct501`2<class BaseClass0,class BaseClass0> .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!2,string) leave.s LV0 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0: .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.B.T<class BaseClass1,valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV1 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1: .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.B.B<valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV2 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2: .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass1,class BaseClass0,valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!2,string) leave.s LV3 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3: .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.B.T<class BaseClass0,valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV4 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV4} LV4: .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.B.A<valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV5 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV5} LV5: .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!2,string) leave.s LV6 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV6} LV6: .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.A.T<class BaseClass1,valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV7 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV7} LV7: .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.A.B<valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV8 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV8} LV8: .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!2,string) leave.s LV9 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV9} LV9: .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.A.T<class BaseClass0,valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV10 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV10} LV10: .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.A.A<valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV11 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV11} LV11: .locals init (valuetype MyStruct501`2<class BaseClass0,class BaseClass1> V_6) ldloca V_6 initobj valuetype MyStruct501`2<class BaseClass0,class BaseClass1> .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!2,string) leave.s LV12 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV12} LV12: .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.B.T<class BaseClass1,valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV13 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV13} LV13: .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.B.B<valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV14 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV14} LV14: .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass1,class BaseClass0,valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!2,string) leave.s LV15 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV15} LV15: .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.B.T<class BaseClass0,valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV16 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV16} LV16: .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.B.A<valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV17 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV17} LV17: .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!2,string) leave.s LV18 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV18} LV18: .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.A.T<class BaseClass1,valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV19 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV19} LV19: .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.A.B<valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV20 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV20} LV20: .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!2,string) leave.s LV21 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV21} LV21: .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.A.T<class BaseClass0,valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV22 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV22} LV22: .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.A.A<valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV23 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV23} LV23: .locals init (valuetype MyStruct501`2<class BaseClass1,class BaseClass0> V_7) ldloca V_7 initobj valuetype MyStruct501`2<class BaseClass1,class BaseClass0> .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!2,string) leave.s LV24 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV24} LV24: .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.B.T<class BaseClass1,valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!1,string) leave.s LV25 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV25} LV25: .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.B.B<valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!0,string) leave.s LV26 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV26} LV26: .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass1,class BaseClass0,valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!2,string) leave.s LV27 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV27} LV27: .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.B.T<class BaseClass0,valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!1,string) leave.s LV28 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV28} LV28: .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.B.A<valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!0,string) leave.s LV29 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV29} LV29: .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!2,string) leave.s LV30 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV30} LV30: .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.A.T<class BaseClass1,valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!1,string) leave.s LV31 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV31} LV31: .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.A.B<valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!0,string) leave.s LV32 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV32} LV32: .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!2,string) leave.s LV33 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV33} LV33: .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.A.T<class BaseClass0,valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!1,string) leave.s LV34 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV34} LV34: .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.A.A<valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!0,string) leave.s LV35 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV35} LV35: .locals init (valuetype MyStruct501`2<class BaseClass1,class BaseClass1> V_8) ldloca V_8 initobj valuetype MyStruct501`2<class BaseClass1,class BaseClass1> .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!2,string) leave.s LV36 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV36} LV36: .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.B.T<class BaseClass1,valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!1,string) leave.s LV37 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV37} LV37: .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.B.B<valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!0,string) leave.s LV38 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV38} LV38: .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass1,class BaseClass0,valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!2,string) leave.s LV39 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV39} LV39: .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.B.T<class BaseClass0,valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!1,string) leave.s LV40 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV40} LV40: .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.B.A<valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!0,string) leave.s LV41 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV41} LV41: .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!2,string) leave.s LV42 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV42} LV42: .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.A.T<class BaseClass1,valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!1,string) leave.s LV43 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV43} LV43: .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.A.B<valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!0,string) leave.s LV44 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV44} LV44: .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!2,string) leave.s LV45 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV45} LV45: .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.A.T<class BaseClass0,valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!1,string) leave.s LV46 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV46} LV46: .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.A.A<valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!0,string) leave.s LV47 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV47} LV47: ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed { .maxstack 10 ldstr "===================== Struct Constrained Interface Calls Test =====================" call void [mscorlib]System.Console::WriteLine(string) .locals init (valuetype MyStruct501`2<class BaseClass0,class BaseClass0> V_9) ldloca V_9 initobj valuetype MyStruct501`2<class BaseClass0,class BaseClass0> .try { ldloc V_9 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!2,string) leave.s LV0 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0: .try { ldloc V_9 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.A.T<class BaseClass0,valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV1 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1: .try { ldloc V_9 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.A.A<valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV2 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2: .locals init (valuetype MyStruct501`2<class BaseClass0,class BaseClass1> V_10) ldloca V_10 initobj valuetype MyStruct501`2<class BaseClass0,class BaseClass1> .try { ldloc V_10 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!2,string) leave.s LV3 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3: .try { ldloc V_10 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.A.T<class BaseClass1,valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV4 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV4} LV4: .try { ldloc V_10 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.A.B<valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV5 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV5} LV5: .locals init (valuetype MyStruct501`2<class BaseClass1,class BaseClass0> V_11) ldloca V_11 initobj valuetype MyStruct501`2<class BaseClass1,class BaseClass0> .try { ldloc V_11 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.T.T<class BaseClass1,class BaseClass0,valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!2,string) leave.s LV6 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV6} LV6: .try { ldloc V_11 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.B.T<class BaseClass0,valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!1,string) leave.s LV7 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV7} LV7: .try { ldloc V_11 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.B.A<valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!0,string) leave.s LV8 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV8} LV8: .locals init (valuetype MyStruct501`2<class BaseClass1,class BaseClass1> V_12) ldloca V_12 initobj valuetype MyStruct501`2<class BaseClass1,class BaseClass1> .try { ldloc V_12 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!2,string) leave.s LV9 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV9} LV9: .try { ldloc V_12 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.B.T<class BaseClass1,valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!1,string) leave.s LV10 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV10} LV10: .try { ldloc V_12 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.B.B<valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!0,string) leave.s LV11 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV11} LV11: ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void CalliTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calli Test ==========================" call void [mscorlib]System.Console::WriteLine(string) .locals init (valuetype MyStruct501`2<class BaseClass0,class BaseClass0> V_13) ldloca V_13 initobj valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.3906<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::ClassMethod992() calli default string(object) ldstr "MyStruct501::ClassMethod992.3910()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::ClassMethod993<object>() calli default string(object) ldstr "MyStruct501::ClassMethod993.3911<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::ClassMethod994<object>() calli default string(object) ldstr "MyStruct501::ClassMethod994.3912<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldnull ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldvirtftn instance bool valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::Equals(object) calli default bool(object,object) pop ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldvirtftn instance int32 valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::GetHashCode() calli default int32(object) pop ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::ToString() calli default string(object) pop ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) .locals init (valuetype MyStruct501`2<class BaseClass0,class BaseClass1> V_14) ldloca V_14 initobj valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.3906<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::ClassMethod992() calli default string(object) ldstr "MyStruct501::ClassMethod992.3910()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::ClassMethod993<object>() calli default string(object) ldstr "MyStruct501::ClassMethod993.3911<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::ClassMethod994<object>() calli default string(object) ldstr "MyStruct501::ClassMethod994.3912<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldnull ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldvirtftn instance bool valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::Equals(object) calli default bool(object,object) pop ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldvirtftn instance int32 valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::GetHashCode() calli default int32(object) pop ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::ToString() calli default string(object) pop ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) .locals init (valuetype MyStruct501`2<class BaseClass1,class BaseClass0> V_15) ldloca V_15 initobj valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.3906<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::ClassMethod992() calli default string(object) ldstr "MyStruct501::ClassMethod992.3910()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::ClassMethod993<object>() calli default string(object) ldstr "MyStruct501::ClassMethod993.3911<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::ClassMethod994<object>() calli default string(object) ldstr "MyStruct501::ClassMethod994.3912<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldnull ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldvirtftn instance bool valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::Equals(object) calli default bool(object,object) pop ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldvirtftn instance int32 valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::GetHashCode() calli default int32(object) pop ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::ToString() calli default string(object) pop ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) .locals init (valuetype MyStruct501`2<class BaseClass1,class BaseClass1> V_16) ldloca V_16 initobj valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.3906<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::ClassMethod992() calli default string(object) ldstr "MyStruct501::ClassMethod992.3910()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::ClassMethod993<object>() calli default string(object) ldstr "MyStruct501::ClassMethod993.3911<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::ClassMethod994<object>() calli default string(object) ldstr "MyStruct501::ClassMethod994.3912<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldnull ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldvirtftn instance bool valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::Equals(object) calli default bool(object,object) pop ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldvirtftn instance int32 valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::GetHashCode() calli default int32(object) pop ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::ToString() calli default string(object) pop ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 10 call void Generated451::MethodCallingTest() call void Generated451::ConstrainedCallsTest() call void Generated451::StructConstrainedInterfaceCallsTest() call void Generated451::CalliTest() ldc.i4 100 ret } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 } .assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) } //TYPES IN FORWARDER ASSEMBLIES: //TEST ASSEMBLY: .assembly Generated451 { .hash algorithm 0x00008004 } .assembly extern xunit.core {} .class public BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class public BaseClass1 extends BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void BaseClass0::.ctor() ret } } .class public sequential sealed MyStruct501`2<T0, T1> extends [mscorlib]System.ValueType implements class IBase2`2<class BaseClass1,class BaseClass1>, class IBase2`2<class BaseClass1,class BaseClass0> { .pack 0 .size 1 .method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining { ldstr "MyStruct501::Method7.3906<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase2<class BaseClass1,class BaseClass1>.Method7'<M0>() cil managed noinlining { .override method instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<[1]>() ldstr "MyStruct501::Method7.MI.3907<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase2<class BaseClass1,class BaseClass0>.Method7'<M0>() cil managed noinlining { .override method instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<[1]>() ldstr "MyStruct501::Method7.MI.3909<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot instance string ClassMethod992() cil managed noinlining { ldstr "MyStruct501::ClassMethod992.3910()" ret } .method public hidebysig newslot instance string ClassMethod993<M0>() cil managed noinlining { ldstr "MyStruct501::ClassMethod993.3911<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot instance string ClassMethod994<M0>() cil managed noinlining { ldstr "MyStruct501::ClassMethod994.3912<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig virtual instance bool Equals(object obj) cil managed { ldc.i4.0 ret } .method public hidebysig virtual instance int32 GetHashCode() cil managed { ldc.i4.0 ret } .method public hidebysig virtual instance string ToString() cil managed { ldstr "" ret } } .class interface public abstract IBase2`2<+T0, -T1> { .method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { } } .class public auto ansi beforefieldinit Generated451 { .method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.MyStruct501.T.T<T0,T1,(valuetype MyStruct501`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 7 .locals init (string[] actualResults) ldc.i4.s 2 newarr string stloc.s actualResults ldarg.1 ldstr "M.MyStruct501.T.T<T0,T1,(valuetype MyStruct501`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 2 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. valuetype MyStruct501`2<!!T0,!!T1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. valuetype MyStruct501`2<!!T0,!!T1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.MyStruct501.A.T<T1,(valuetype MyStruct501`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 7 .locals init (string[] actualResults) ldc.i4.s 2 newarr string stloc.s actualResults ldarg.1 ldstr "M.MyStruct501.A.T<T1,(valuetype MyStruct501`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 2 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass0,!!T1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass0,!!T1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.MyStruct501.A.A<(valuetype MyStruct501`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 7 .locals init (string[] actualResults) ldc.i4.s 2 newarr string stloc.s actualResults ldarg.1 ldstr "M.MyStruct501.A.A<(valuetype MyStruct501`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 2 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass0,class BaseClass0> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass0,class BaseClass0> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.MyStruct501.A.B<(valuetype MyStruct501`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 7 .locals init (string[] actualResults) ldc.i4.s 2 newarr string stloc.s actualResults ldarg.1 ldstr "M.MyStruct501.A.B<(valuetype MyStruct501`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 2 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass0,class BaseClass1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass0,class BaseClass1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.MyStruct501.B.T<T1,(valuetype MyStruct501`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 7 .locals init (string[] actualResults) ldc.i4.s 2 newarr string stloc.s actualResults ldarg.1 ldstr "M.MyStruct501.B.T<T1,(valuetype MyStruct501`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 2 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass1,!!T1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass1,!!T1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.MyStruct501.B.A<(valuetype MyStruct501`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 7 .locals init (string[] actualResults) ldc.i4.s 2 newarr string stloc.s actualResults ldarg.1 ldstr "M.MyStruct501.B.A<(valuetype MyStruct501`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 2 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass1,class BaseClass0> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass1,class BaseClass0> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.MyStruct501.B.B<(valuetype MyStruct501`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 7 .locals init (string[] actualResults) ldc.i4.s 2 newarr string stloc.s actualResults ldarg.1 ldstr "M.MyStruct501.B.B<(valuetype MyStruct501`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 2 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass1,class BaseClass1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. valuetype MyStruct501`2<class BaseClass1,class BaseClass1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method public hidebysig static void MethodCallingTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calling Test ==========================" call void [mscorlib]System.Console::WriteLine(string) .locals init (valuetype MyStruct501`2<class BaseClass0,class BaseClass0> V_1) ldloca V_1 initobj valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloca V_1 dup call instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "MyStruct501::Method7.3906<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass0> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::ClassMethod992() ldstr "MyStruct501::ClassMethod992.3910()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass0> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::ClassMethod993<object>() ldstr "MyStruct501::ClassMethod993.3911<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass0> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::ClassMethod994<object>() ldstr "MyStruct501::ClassMethod994.3912<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass0> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup ldnull call instance bool valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::Equals(object) pop dup call instance int32 valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::GetHashCode() pop dup call instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::ToString() pop pop ldloc V_1 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_1 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_1 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_1 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop .locals init (valuetype MyStruct501`2<class BaseClass0,class BaseClass1> V_2) ldloca V_2 initobj valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloca V_2 dup call instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "MyStruct501::Method7.3906<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass1> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::ClassMethod992() ldstr "MyStruct501::ClassMethod992.3910()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass1> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::ClassMethod993<object>() ldstr "MyStruct501::ClassMethod993.3911<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass1> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::ClassMethod994<object>() ldstr "MyStruct501::ClassMethod994.3912<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass1> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup ldnull call instance bool valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::Equals(object) pop dup call instance int32 valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::GetHashCode() pop dup call instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::ToString() pop pop ldloc V_2 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_2 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_2 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_2 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop .locals init (valuetype MyStruct501`2<class BaseClass1,class BaseClass0> V_3) ldloca V_3 initobj valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloca V_3 dup call instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "MyStruct501::Method7.3906<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass0> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::ClassMethod992() ldstr "MyStruct501::ClassMethod992.3910()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass0> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::ClassMethod993<object>() ldstr "MyStruct501::ClassMethod993.3911<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass0> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::ClassMethod994<object>() ldstr "MyStruct501::ClassMethod994.3912<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass0> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup ldnull call instance bool valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::Equals(object) pop dup call instance int32 valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::GetHashCode() pop dup call instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::ToString() pop pop ldloc V_3 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_3 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_3 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_3 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop .locals init (valuetype MyStruct501`2<class BaseClass1,class BaseClass1> V_4) ldloca V_4 initobj valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloca V_4 dup call instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "MyStruct501::Method7.3906<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass1> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::ClassMethod992() ldstr "MyStruct501::ClassMethod992.3910()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass1> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::ClassMethod993<object>() ldstr "MyStruct501::ClassMethod993.3911<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass1> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::ClassMethod994<object>() ldstr "MyStruct501::ClassMethod994.3912<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass1> on type MyStruct501" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup ldnull call instance bool valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::Equals(object) pop dup call instance int32 valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::GetHashCode() pop dup call instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::ToString() pop pop ldloc V_4 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_4 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_4 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_4 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void ConstrainedCallsTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Constrained Calls Test ==========================" call void [mscorlib]System.Console::WriteLine(string) .locals init (valuetype MyStruct501`2<class BaseClass0,class BaseClass0> V_5) ldloca V_5 initobj valuetype MyStruct501`2<class BaseClass0,class BaseClass0> .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!2,string) leave.s LV0 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0: .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.B.T<class BaseClass1,valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV1 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1: .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.B.B<valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV2 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2: .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass1,class BaseClass0,valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!2,string) leave.s LV3 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3: .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.B.T<class BaseClass0,valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV4 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV4} LV4: .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.B.A<valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV5 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV5} LV5: .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!2,string) leave.s LV6 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV6} LV6: .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.A.T<class BaseClass1,valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV7 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV7} LV7: .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.A.B<valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV8 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV8} LV8: .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!2,string) leave.s LV9 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV9} LV9: .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.A.T<class BaseClass0,valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV10 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV10} LV10: .try { ldloc V_5 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.A.A<valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV11 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV11} LV11: .locals init (valuetype MyStruct501`2<class BaseClass0,class BaseClass1> V_6) ldloca V_6 initobj valuetype MyStruct501`2<class BaseClass0,class BaseClass1> .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!2,string) leave.s LV12 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV12} LV12: .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.B.T<class BaseClass1,valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV13 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV13} LV13: .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.B.B<valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV14 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV14} LV14: .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass1,class BaseClass0,valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!2,string) leave.s LV15 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV15} LV15: .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.B.T<class BaseClass0,valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV16 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV16} LV16: .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.B.A<valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV17 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV17} LV17: .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!2,string) leave.s LV18 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV18} LV18: .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.A.T<class BaseClass1,valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV19 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV19} LV19: .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.A.B<valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV20 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV20} LV20: .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!2,string) leave.s LV21 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV21} LV21: .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.A.T<class BaseClass0,valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV22 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV22} LV22: .try { ldloc V_6 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.A.A<valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV23 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV23} LV23: .locals init (valuetype MyStruct501`2<class BaseClass1,class BaseClass0> V_7) ldloca V_7 initobj valuetype MyStruct501`2<class BaseClass1,class BaseClass0> .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!2,string) leave.s LV24 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV24} LV24: .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.B.T<class BaseClass1,valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!1,string) leave.s LV25 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV25} LV25: .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.B.B<valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!0,string) leave.s LV26 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV26} LV26: .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass1,class BaseClass0,valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!2,string) leave.s LV27 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV27} LV27: .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.B.T<class BaseClass0,valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!1,string) leave.s LV28 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV28} LV28: .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.B.A<valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!0,string) leave.s LV29 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV29} LV29: .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!2,string) leave.s LV30 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV30} LV30: .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.A.T<class BaseClass1,valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!1,string) leave.s LV31 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV31} LV31: .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.A.B<valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!0,string) leave.s LV32 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV32} LV32: .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!2,string) leave.s LV33 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV33} LV33: .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.A.T<class BaseClass0,valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!1,string) leave.s LV34 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV34} LV34: .try { ldloc V_7 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.A.A<valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!0,string) leave.s LV35 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV35} LV35: .locals init (valuetype MyStruct501`2<class BaseClass1,class BaseClass1> V_8) ldloca V_8 initobj valuetype MyStruct501`2<class BaseClass1,class BaseClass1> .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!2,string) leave.s LV36 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV36} LV36: .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.B.T<class BaseClass1,valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!1,string) leave.s LV37 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV37} LV37: .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.B.B<valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!0,string) leave.s LV38 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV38} LV38: .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass1,class BaseClass0,valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!2,string) leave.s LV39 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV39} LV39: .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.B.T<class BaseClass0,valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!1,string) leave.s LV40 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV40} LV40: .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.B.A<valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!0,string) leave.s LV41 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV41} LV41: .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!2,string) leave.s LV42 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV42} LV42: .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.A.T<class BaseClass1,valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!1,string) leave.s LV43 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV43} LV43: .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" call void Generated451::M.IBase2.A.B<valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!0,string) leave.s LV44 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV44} LV44: .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!2,string) leave.s LV45 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV45} LV45: .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.A.T<class BaseClass0,valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!1,string) leave.s LV46 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV46} LV46: .try { ldloc V_8 ldstr "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.IBase2.A.A<valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!0,string) leave.s LV47 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV47} LV47: ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed { .maxstack 10 ldstr "===================== Struct Constrained Interface Calls Test =====================" call void [mscorlib]System.Console::WriteLine(string) .locals init (valuetype MyStruct501`2<class BaseClass0,class BaseClass0> V_9) ldloca V_9 initobj valuetype MyStruct501`2<class BaseClass0,class BaseClass0> .try { ldloc V_9 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!2,string) leave.s LV0 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0: .try { ldloc V_9 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.A.T<class BaseClass0,valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV1 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1: .try { ldloc V_9 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.A.A<valuetype MyStruct501`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV2 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2: .locals init (valuetype MyStruct501`2<class BaseClass0,class BaseClass1> V_10) ldloca V_10 initobj valuetype MyStruct501`2<class BaseClass0,class BaseClass1> .try { ldloc V_10 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!2,string) leave.s LV3 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3: .try { ldloc V_10 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.A.T<class BaseClass1,valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV4 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV4} LV4: .try { ldloc V_10 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.A.B<valuetype MyStruct501`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV5 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV5} LV5: .locals init (valuetype MyStruct501`2<class BaseClass1,class BaseClass0> V_11) ldloca V_11 initobj valuetype MyStruct501`2<class BaseClass1,class BaseClass0> .try { ldloc V_11 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.T.T<class BaseClass1,class BaseClass0,valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!2,string) leave.s LV6 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV6} LV6: .try { ldloc V_11 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.B.T<class BaseClass0,valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!1,string) leave.s LV7 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV7} LV7: .try { ldloc V_11 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.B.A<valuetype MyStruct501`2<class BaseClass1,class BaseClass0>>(!!0,string) leave.s LV8 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV8} LV8: .locals init (valuetype MyStruct501`2<class BaseClass1,class BaseClass1> V_12) ldloca V_12 initobj valuetype MyStruct501`2<class BaseClass1,class BaseClass1> .try { ldloc V_12 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!2,string) leave.s LV9 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV9} LV9: .try { ldloc V_12 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.B.T<class BaseClass1,valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!1,string) leave.s LV10 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV10} LV10: .try { ldloc V_12 ldstr "MyStruct501::Method7.MI.3907<System.Object>()#" + "MyStruct501::Method7.MI.3909<System.Object>()#" call void Generated451::M.MyStruct501.B.B<valuetype MyStruct501`2<class BaseClass1,class BaseClass1>>(!!0,string) leave.s LV11 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV11} LV11: ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void CalliTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calli Test ==========================" call void [mscorlib]System.Console::WriteLine(string) .locals init (valuetype MyStruct501`2<class BaseClass0,class BaseClass0> V_13) ldloca V_13 initobj valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.3906<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::ClassMethod992() calli default string(object) ldstr "MyStruct501::ClassMethod992.3910()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::ClassMethod993<object>() calli default string(object) ldstr "MyStruct501::ClassMethod993.3911<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::ClassMethod994<object>() calli default string(object) ldstr "MyStruct501::ClassMethod994.3912<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldnull ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldvirtftn instance bool valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::Equals(object) calli default bool(object,object) pop ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldvirtftn instance int32 valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::GetHashCode() calli default int32(object) pop ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass0>::ToString() calli default string(object) pop ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct501`2<class BaseClass0,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) .locals init (valuetype MyStruct501`2<class BaseClass0,class BaseClass1> V_14) ldloca V_14 initobj valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.3906<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::ClassMethod992() calli default string(object) ldstr "MyStruct501::ClassMethod992.3910()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::ClassMethod993<object>() calli default string(object) ldstr "MyStruct501::ClassMethod993.3911<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::ClassMethod994<object>() calli default string(object) ldstr "MyStruct501::ClassMethod994.3912<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldnull ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldvirtftn instance bool valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::Equals(object) calli default bool(object,object) pop ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldvirtftn instance int32 valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::GetHashCode() calli default int32(object) pop ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass0,class BaseClass1>::ToString() calli default string(object) pop ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct501`2<class BaseClass0,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) .locals init (valuetype MyStruct501`2<class BaseClass1,class BaseClass0> V_15) ldloca V_15 initobj valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.3906<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::ClassMethod992() calli default string(object) ldstr "MyStruct501::ClassMethod992.3910()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::ClassMethod993<object>() calli default string(object) ldstr "MyStruct501::ClassMethod993.3911<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::ClassMethod994<object>() calli default string(object) ldstr "MyStruct501::ClassMethod994.3912<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldnull ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldvirtftn instance bool valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::Equals(object) calli default bool(object,object) pop ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldvirtftn instance int32 valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::GetHashCode() calli default int32(object) pop ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass0>::ToString() calli default string(object) pop ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldloc V_15 box valuetype MyStruct501`2<class BaseClass1,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) .locals init (valuetype MyStruct501`2<class BaseClass1,class BaseClass1> V_16) ldloca V_16 initobj valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.3906<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::ClassMethod992() calli default string(object) ldstr "MyStruct501::ClassMethod992.3910()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::ClassMethod993<object>() calli default string(object) ldstr "MyStruct501::ClassMethod993.3911<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::ClassMethod994<object>() calli default string(object) ldstr "MyStruct501::ClassMethod994.3912<System.Object>()" ldstr "valuetype MyStruct501`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldnull ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldvirtftn instance bool valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::Equals(object) calli default bool(object,object) pop ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldvirtftn instance int32 valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::GetHashCode() calli default int32(object) pop ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldvirtftn instance string valuetype MyStruct501`2<class BaseClass1,class BaseClass1>::ToString() calli default string(object) pop ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3907<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldloc V_16 box valuetype MyStruct501`2<class BaseClass1,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct501::Method7.MI.3909<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct501`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 10 call void Generated451::MethodCallingTest() call void Generated451::ConstrainedCallsTest() call void Generated451::StructConstrainedInterfaceCallsTest() call void Generated451::CalliTest() ldc.i4 100 ret } }
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/coreclr/ildasm/ildasmpch.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #if !defined(_ILDASMPCH_H) #define _ILDASMPCH_H #define OEMRESOURCE #define INITGUID #include <windows.h> #include <cor.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include <winuser.h> #include <commctrl.h> #include <commdlg.h> #include <richedit.h> #include <shellapi.h> #include <htmlhelp.h> #include <conio.h> #ifndef Debug_ReportError #define Debug_ReportError(strMessage) #endif #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #if !defined(_ILDASMPCH_H) #define _ILDASMPCH_H #define OEMRESOURCE #define INITGUID #include <windows.h> #include <cor.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include <winuser.h> #include <commctrl.h> #include <commdlg.h> #include <richedit.h> #include <shellapi.h> #include <htmlhelp.h> #include <conio.h> #ifndef Debug_ReportError #define Debug_ReportError(strMessage) #endif #endif
-1
dotnet/runtime
66,109
Skip Upper vector save/restore for blocks that THROW
Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
kunalspathak
2022-03-02T21:17:13Z
2022-03-04T07:00:20Z
a0635832f1c4cf02423b4cb40cb8611a4163a78d
d832befeb0edf5e7b5685beb5b7e3d7932a1fa28
Skip Upper vector save/restore for blocks that THROW. Currently, we save and restore vectors for `BBJ_THROW` blocks as well, which is not really needed. In https://github.com/dotnet/runtime/pull/62662, we avoiding doing save/restore for calls that do not return. This PR extends the logic to also skip save/restore if the block ends with throw. This also reverts https://github.com/dotnet/runtime/pull/66062. Fixes: https://github.com/dotnet/runtime/issues/65332
./src/libraries/System.Runtime.Serialization.Formatters/src/System/Runtime/Serialization/Formatters/Binary/BinaryFormatterWriter.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics; using System.Globalization; using System.IO; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Text; namespace System.Runtime.Serialization.Formatters.Binary { internal sealed class BinaryFormatterWriter { private const int ChunkSize = 4096; private readonly Stream _outputStream; private readonly FormatterTypeStyle _formatterTypeStyle; private readonly ObjectWriter _objectWriter; private readonly BinaryWriter _dataWriter; private int _consecutiveNullArrayEntryCount; private Dictionary<string, ObjectMapInfo>? _objectMapTable; private BinaryObject? _binaryObject; private BinaryObjectWithMap? _binaryObjectWithMap; private BinaryObjectWithMapTyped? _binaryObjectWithMapTyped; private BinaryObjectString? _binaryObjectString; private BinaryArray? _binaryArray; private byte[]? _byteBuffer; private MemberPrimitiveUnTyped? _memberPrimitiveUnTyped; private MemberPrimitiveTyped? _memberPrimitiveTyped; private ObjectNull? _objectNull; private MemberReference? _memberReference; private BinaryAssembly? _binaryAssembly; internal BinaryFormatterWriter(Stream outputStream, ObjectWriter objectWriter, FormatterTypeStyle formatterTypeStyle) { _outputStream = outputStream; _formatterTypeStyle = formatterTypeStyle; _objectWriter = objectWriter; _dataWriter = new BinaryWriter(outputStream, Encoding.UTF8); } internal void WriteBegin() { } internal void WriteEnd() { _dataWriter.Flush(); } internal void WriteBoolean(bool value) => _dataWriter.Write(value); internal void WriteByte(byte value) => _dataWriter.Write(value); private void WriteBytes(byte[] value) => _dataWriter.Write(value); private void WriteBytes(byte[] byteA, int offset, int size) => _dataWriter.Write(byteA, offset, size); internal void WriteChar(char value) => _dataWriter.Write(value); internal void WriteChars(char[] value) => _dataWriter.Write(value); internal void WriteDecimal(decimal value) => WriteString(value.ToString(CultureInfo.InvariantCulture)); internal void WriteSingle(float value) => _dataWriter.Write(value); internal void WriteDouble(double value) => _dataWriter.Write(value); internal void WriteInt16(short value) => _dataWriter.Write(value); internal void WriteInt32(int value) => _dataWriter.Write(value); internal void WriteInt64(long value) => _dataWriter.Write(value); internal void WriteSByte(sbyte value) => WriteByte(unchecked((byte)value)); internal void WriteString(string value) => _dataWriter.Write(value); internal void WriteTimeSpan(TimeSpan value) => WriteInt64(value.Ticks); internal void WriteDateTime(DateTime value) { // In .NET Framework, BinaryFormatter is able to access DateTime's ToBinaryRaw, // which just returns the value of its sole Int64 dateData field. Here, we don't // have access to that member (which doesn't even exist anymore, since it was only for // BinaryFormatter, which is now in a separate assembly). To address that, // we access the sole field directly via an unsafe cast. long dateData = Unsafe.As<DateTime, long>(ref value); WriteInt64(dateData); } internal void WriteUInt16(ushort value) => _dataWriter.Write(value); internal void WriteUInt32(uint value) => _dataWriter.Write(value); internal void WriteUInt64(ulong value) => _dataWriter.Write(value); internal void WriteObjectEnd(NameInfo memberNameInfo, NameInfo typeNameInfo) { } internal void WriteSerializationHeaderEnd() { var record = new MessageEnd(); record.Write(this); } internal void WriteSerializationHeader(int topId, int headerId, int minorVersion, int majorVersion) { var record = new SerializationHeaderRecord(BinaryHeaderEnum.SerializedStreamHeader, topId, headerId, minorVersion, majorVersion); record.Write(this); } internal void WriteObject(NameInfo nameInfo, NameInfo? typeNameInfo, int numMembers, string[] memberNames, Type[] memberTypes, WriteObjectInfo[] memberObjectInfos) { InternalWriteItemNull(); int assemId; int objectId = (int)nameInfo._objectId; Debug.Assert(typeNameInfo != null); // Explicitly called with null. Potential bug, but closed as Won't Fix: https://github.com/dotnet/runtime/issues/31402 string? objectName = objectId < 0 ? typeNameInfo.NIname : // Nested Object nameInfo.NIname; // Non-Nested if (_objectMapTable == null) { _objectMapTable = new Dictionary<string, ObjectMapInfo>(); } Debug.Assert(objectName != null); if (_objectMapTable.TryGetValue(objectName, out ObjectMapInfo? objectMapInfo) && objectMapInfo.IsCompatible(numMembers, memberNames, memberTypes)) { // Object if (_binaryObject == null) { _binaryObject = new BinaryObject(); } _binaryObject.Set(objectId, objectMapInfo._objectId); _binaryObject.Write(this); } else if (!typeNameInfo._transmitTypeOnObject) { // ObjectWithMap if (_binaryObjectWithMap == null) { _binaryObjectWithMap = new BinaryObjectWithMap(); } // BCL types are not placed into table assemId = (int)typeNameInfo._assemId; _binaryObjectWithMap.Set(objectId, objectName, numMembers, memberNames, assemId); _binaryObjectWithMap.Write(this); if (objectMapInfo == null) { _objectMapTable.Add(objectName, new ObjectMapInfo(objectId, numMembers, memberNames, memberTypes)); } } else { // ObjectWithMapTyped var binaryTypeEnumA = new BinaryTypeEnum[numMembers]; var typeInformationA = new object?[numMembers]; var assemIdA = new int[numMembers]; for (int i = 0; i < numMembers; i++) { object? typeInformation; binaryTypeEnumA[i] = BinaryTypeConverter.GetBinaryTypeInfo(memberTypes[i], memberObjectInfos[i], null, _objectWriter, out typeInformation, out assemId); typeInformationA[i] = typeInformation; assemIdA[i] = assemId; } if (_binaryObjectWithMapTyped == null) { _binaryObjectWithMapTyped = new BinaryObjectWithMapTyped(); } // BCL types are not placed in table assemId = (int)typeNameInfo._assemId; _binaryObjectWithMapTyped.Set(objectId, objectName, numMembers, memberNames, binaryTypeEnumA, typeInformationA, assemIdA, assemId); _binaryObjectWithMapTyped.Write(this); if (objectMapInfo == null) { _objectMapTable.Add(objectName, new ObjectMapInfo(objectId, numMembers, memberNames, memberTypes)); } } } internal void WriteObjectString(int objectId, string? value) { InternalWriteItemNull(); if (_binaryObjectString == null) { _binaryObjectString = new BinaryObjectString(); } _binaryObjectString.Set(objectId, value); _binaryObjectString.Write(this); } internal void WriteSingleArray(NameInfo memberNameInfo, NameInfo arrayNameInfo, WriteObjectInfo? objectInfo, NameInfo arrayElemTypeNameInfo, int length, int lowerBound, Array array) { InternalWriteItemNull(); BinaryArrayTypeEnum binaryArrayTypeEnum; var lengthA = new int[1]; lengthA[0] = length; int[]? lowerBoundA = null; object? typeInformation; if (lowerBound == 0) { binaryArrayTypeEnum = BinaryArrayTypeEnum.Single; } else { binaryArrayTypeEnum = BinaryArrayTypeEnum.SingleOffset; lowerBoundA = new int[1]; lowerBoundA[0] = lowerBound; } int assemId; BinaryTypeEnum binaryTypeEnum = BinaryTypeConverter.GetBinaryTypeInfo( arrayElemTypeNameInfo._type!, objectInfo, arrayElemTypeNameInfo.NIname, _objectWriter, out typeInformation, out assemId); if (_binaryArray == null) { _binaryArray = new BinaryArray(); } _binaryArray.Set((int)arrayNameInfo._objectId, 1, lengthA, lowerBoundA, binaryTypeEnum, typeInformation, binaryArrayTypeEnum, assemId); _binaryArray.Write(this); if (Converter.IsWriteAsByteArray(arrayElemTypeNameInfo._primitiveTypeEnum) && (lowerBound == 0)) { //array is written out as an array of bytes if (arrayElemTypeNameInfo._primitiveTypeEnum == InternalPrimitiveTypeE.Byte) { WriteBytes((byte[])array); } else if (arrayElemTypeNameInfo._primitiveTypeEnum == InternalPrimitiveTypeE.Char) { WriteChars((char[])array); } else { WriteArrayAsBytes(array, Converter.TypeLength(arrayElemTypeNameInfo._primitiveTypeEnum)); } } } private void WriteArrayAsBytes(Array array, int typeLength) { InternalWriteItemNull(); int arrayOffset = 0; if (_byteBuffer == null) { _byteBuffer = new byte[ChunkSize]; } while (arrayOffset < array.Length) { int numArrayItems = Math.Min(ChunkSize / typeLength, array.Length - arrayOffset); int bufferUsed = numArrayItems * typeLength; Buffer.BlockCopy(array, arrayOffset * typeLength, _byteBuffer, 0, bufferUsed); if (!BitConverter.IsLittleEndian) { // we know that we are writing a primitive type, so just do a simple swap for (int i = 0; i < bufferUsed; i += typeLength) { for (int j = 0; j < typeLength / 2; j++) { byte tmp = _byteBuffer[i + j]; _byteBuffer[i + j] = _byteBuffer[i + typeLength - 1 - j]; _byteBuffer[i + typeLength - 1 - j] = tmp; } } } WriteBytes(_byteBuffer, 0, bufferUsed); arrayOffset += numArrayItems; } } internal void WriteJaggedArray(NameInfo memberNameInfo, NameInfo arrayNameInfo, WriteObjectInfo? objectInfo, NameInfo arrayElemTypeNameInfo, int length, int lowerBound) { InternalWriteItemNull(); BinaryArrayTypeEnum binaryArrayTypeEnum; var lengthA = new int[1]; lengthA[0] = length; int[]? lowerBoundA = null; object? typeInformation; int assemId; if (lowerBound == 0) { binaryArrayTypeEnum = BinaryArrayTypeEnum.Jagged; } else { binaryArrayTypeEnum = BinaryArrayTypeEnum.JaggedOffset; lowerBoundA = new int[1]; lowerBoundA[0] = lowerBound; } BinaryTypeEnum binaryTypeEnum = BinaryTypeConverter.GetBinaryTypeInfo(arrayElemTypeNameInfo._type!, objectInfo, arrayElemTypeNameInfo.NIname, _objectWriter, out typeInformation, out assemId); if (_binaryArray == null) { _binaryArray = new BinaryArray(); } _binaryArray.Set((int)arrayNameInfo._objectId, 1, lengthA, lowerBoundA, binaryTypeEnum, typeInformation, binaryArrayTypeEnum, assemId); _binaryArray.Write(this); } internal void WriteRectangleArray(NameInfo memberNameInfo, NameInfo arrayNameInfo, WriteObjectInfo? objectInfo, NameInfo arrayElemTypeNameInfo, int rank, int[] lengthA, int[] lowerBoundA) { InternalWriteItemNull(); BinaryArrayTypeEnum binaryArrayTypeEnum = BinaryArrayTypeEnum.Rectangular; object? typeInformation; int assemId; BinaryTypeEnum binaryTypeEnum = BinaryTypeConverter.GetBinaryTypeInfo(arrayElemTypeNameInfo._type!, objectInfo, arrayElemTypeNameInfo.NIname, _objectWriter, out typeInformation, out assemId); if (_binaryArray == null) { _binaryArray = new BinaryArray(); } for (int i = 0; i < rank; i++) { if (lowerBoundA[i] != 0) { binaryArrayTypeEnum = BinaryArrayTypeEnum.RectangularOffset; break; } } _binaryArray.Set((int)arrayNameInfo._objectId, rank, lengthA, lowerBoundA, binaryTypeEnum, typeInformation, binaryArrayTypeEnum, assemId); _binaryArray.Write(this); } internal void WriteObjectByteArray(NameInfo memberNameInfo, NameInfo arrayNameInfo, WriteObjectInfo? objectInfo, NameInfo arrayElemTypeNameInfo, int length, int lowerBound, byte[] byteA) { InternalWriteItemNull(); WriteSingleArray(memberNameInfo, arrayNameInfo, objectInfo, arrayElemTypeNameInfo, length, lowerBound, byteA); } internal void WriteMember(NameInfo memberNameInfo, NameInfo typeNameInfo, object value) { InternalWriteItemNull(); InternalPrimitiveTypeE typeInformation = typeNameInfo._primitiveTypeEnum; // Writes Members with primitive values if (memberNameInfo._transmitTypeOnMember) { if (_memberPrimitiveTyped == null) { _memberPrimitiveTyped = new MemberPrimitiveTyped(); } _memberPrimitiveTyped.Set(typeInformation, value); _memberPrimitiveTyped.Write(this); } else { if (_memberPrimitiveUnTyped == null) { _memberPrimitiveUnTyped = new MemberPrimitiveUnTyped(); } _memberPrimitiveUnTyped.Set(typeInformation, value); _memberPrimitiveUnTyped.Write(this); } } internal void WriteNullMember(NameInfo memberNameInfo, NameInfo typeNameInfo) { InternalWriteItemNull(); if (_objectNull == null) { _objectNull = new ObjectNull(); } if (!memberNameInfo._isArrayItem) { _objectNull.SetNullCount(1); _objectNull.Write(this); _consecutiveNullArrayEntryCount = 0; } } internal void WriteMemberObjectRef(NameInfo memberNameInfo, int idRef) { InternalWriteItemNull(); if (_memberReference == null) { _memberReference = new MemberReference(); } _memberReference.Set(idRef); _memberReference.Write(this); } internal void WriteMemberNested(NameInfo memberNameInfo) { InternalWriteItemNull(); } internal void WriteMemberString(NameInfo memberNameInfo, NameInfo typeNameInfo, string? value) { InternalWriteItemNull(); WriteObjectString((int)typeNameInfo._objectId, value); } internal void WriteItem(NameInfo itemNameInfo, NameInfo typeNameInfo, object value) { InternalWriteItemNull(); WriteMember(itemNameInfo, typeNameInfo, value); } internal void WriteNullItem(NameInfo itemNameInfo, NameInfo typeNameInfo) { _consecutiveNullArrayEntryCount++; InternalWriteItemNull(); } internal void WriteDelayedNullItem() { _consecutiveNullArrayEntryCount++; } internal void WriteItemEnd() => InternalWriteItemNull(); private void InternalWriteItemNull() { if (_consecutiveNullArrayEntryCount > 0) { if (_objectNull == null) { _objectNull = new ObjectNull(); } _objectNull.SetNullCount(_consecutiveNullArrayEntryCount); _objectNull.Write(this); _consecutiveNullArrayEntryCount = 0; } } internal void WriteItemObjectRef(NameInfo nameInfo, int idRef) { InternalWriteItemNull(); WriteMemberObjectRef(nameInfo, idRef); } internal void WriteAssembly(Type? type, string assemblyString, int assemId, bool isNew) { //If the file being tested wasn't built as an assembly, then we're going to get null back //for the assembly name. This is very unfortunate. InternalWriteItemNull(); if (assemblyString == null) { assemblyString = string.Empty; } if (isNew) { if (_binaryAssembly == null) { _binaryAssembly = new BinaryAssembly(); } _binaryAssembly.Set(assemId, assemblyString); _binaryAssembly.Write(this); } } // Method to write a value onto a stream given its primitive type code internal void WriteValue(InternalPrimitiveTypeE code, object? value) { switch (code) { case InternalPrimitiveTypeE.Boolean: WriteBoolean(Convert.ToBoolean(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.Byte: WriteByte(Convert.ToByte(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.Char: WriteChar(Convert.ToChar(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.Double: WriteDouble(Convert.ToDouble(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.Int16: WriteInt16(Convert.ToInt16(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.Int32: WriteInt32(Convert.ToInt32(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.Int64: WriteInt64(Convert.ToInt64(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.SByte: WriteSByte(Convert.ToSByte(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.Single: WriteSingle(Convert.ToSingle(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.UInt16: WriteUInt16(Convert.ToUInt16(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.UInt32: WriteUInt32(Convert.ToUInt32(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.UInt64: WriteUInt64(Convert.ToUInt64(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.Decimal: WriteDecimal(Convert.ToDecimal(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.TimeSpan: WriteTimeSpan((TimeSpan)value!); break; case InternalPrimitiveTypeE.DateTime: WriteDateTime((DateTime)value!); break; default: throw new SerializationException(SR.Format(SR.Serialization_TypeCode, code.ToString())); } } private sealed class ObjectMapInfo { internal readonly int _objectId; private readonly int _numMembers; private readonly string[] _memberNames; private readonly Type[] _memberTypes; internal ObjectMapInfo(int objectId, int numMembers, string[] memberNames, Type[] memberTypes) { _objectId = objectId; _numMembers = numMembers; _memberNames = memberNames; _memberTypes = memberTypes; } internal bool IsCompatible(int numMembers, string[] memberNames, Type[]? memberTypes) { if (_numMembers != numMembers) { return false; } for (int i = 0; i < numMembers; i++) { if (!(_memberNames[i].Equals(memberNames[i]))) { return false; } if ((memberTypes != null) && (_memberTypes[i] != memberTypes[i])) { return false; } } return true; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics; using System.Globalization; using System.IO; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Text; namespace System.Runtime.Serialization.Formatters.Binary { internal sealed class BinaryFormatterWriter { private const int ChunkSize = 4096; private readonly Stream _outputStream; private readonly FormatterTypeStyle _formatterTypeStyle; private readonly ObjectWriter _objectWriter; private readonly BinaryWriter _dataWriter; private int _consecutiveNullArrayEntryCount; private Dictionary<string, ObjectMapInfo>? _objectMapTable; private BinaryObject? _binaryObject; private BinaryObjectWithMap? _binaryObjectWithMap; private BinaryObjectWithMapTyped? _binaryObjectWithMapTyped; private BinaryObjectString? _binaryObjectString; private BinaryArray? _binaryArray; private byte[]? _byteBuffer; private MemberPrimitiveUnTyped? _memberPrimitiveUnTyped; private MemberPrimitiveTyped? _memberPrimitiveTyped; private ObjectNull? _objectNull; private MemberReference? _memberReference; private BinaryAssembly? _binaryAssembly; internal BinaryFormatterWriter(Stream outputStream, ObjectWriter objectWriter, FormatterTypeStyle formatterTypeStyle) { _outputStream = outputStream; _formatterTypeStyle = formatterTypeStyle; _objectWriter = objectWriter; _dataWriter = new BinaryWriter(outputStream, Encoding.UTF8); } internal void WriteBegin() { } internal void WriteEnd() { _dataWriter.Flush(); } internal void WriteBoolean(bool value) => _dataWriter.Write(value); internal void WriteByte(byte value) => _dataWriter.Write(value); private void WriteBytes(byte[] value) => _dataWriter.Write(value); private void WriteBytes(byte[] byteA, int offset, int size) => _dataWriter.Write(byteA, offset, size); internal void WriteChar(char value) => _dataWriter.Write(value); internal void WriteChars(char[] value) => _dataWriter.Write(value); internal void WriteDecimal(decimal value) => WriteString(value.ToString(CultureInfo.InvariantCulture)); internal void WriteSingle(float value) => _dataWriter.Write(value); internal void WriteDouble(double value) => _dataWriter.Write(value); internal void WriteInt16(short value) => _dataWriter.Write(value); internal void WriteInt32(int value) => _dataWriter.Write(value); internal void WriteInt64(long value) => _dataWriter.Write(value); internal void WriteSByte(sbyte value) => WriteByte(unchecked((byte)value)); internal void WriteString(string value) => _dataWriter.Write(value); internal void WriteTimeSpan(TimeSpan value) => WriteInt64(value.Ticks); internal void WriteDateTime(DateTime value) { // In .NET Framework, BinaryFormatter is able to access DateTime's ToBinaryRaw, // which just returns the value of its sole Int64 dateData field. Here, we don't // have access to that member (which doesn't even exist anymore, since it was only for // BinaryFormatter, which is now in a separate assembly). To address that, // we access the sole field directly via an unsafe cast. long dateData = Unsafe.As<DateTime, long>(ref value); WriteInt64(dateData); } internal void WriteUInt16(ushort value) => _dataWriter.Write(value); internal void WriteUInt32(uint value) => _dataWriter.Write(value); internal void WriteUInt64(ulong value) => _dataWriter.Write(value); internal void WriteObjectEnd(NameInfo memberNameInfo, NameInfo typeNameInfo) { } internal void WriteSerializationHeaderEnd() { var record = new MessageEnd(); record.Write(this); } internal void WriteSerializationHeader(int topId, int headerId, int minorVersion, int majorVersion) { var record = new SerializationHeaderRecord(BinaryHeaderEnum.SerializedStreamHeader, topId, headerId, minorVersion, majorVersion); record.Write(this); } internal void WriteObject(NameInfo nameInfo, NameInfo? typeNameInfo, int numMembers, string[] memberNames, Type[] memberTypes, WriteObjectInfo[] memberObjectInfos) { InternalWriteItemNull(); int assemId; int objectId = (int)nameInfo._objectId; Debug.Assert(typeNameInfo != null); // Explicitly called with null. Potential bug, but closed as Won't Fix: https://github.com/dotnet/runtime/issues/31402 string? objectName = objectId < 0 ? typeNameInfo.NIname : // Nested Object nameInfo.NIname; // Non-Nested if (_objectMapTable == null) { _objectMapTable = new Dictionary<string, ObjectMapInfo>(); } Debug.Assert(objectName != null); if (_objectMapTable.TryGetValue(objectName, out ObjectMapInfo? objectMapInfo) && objectMapInfo.IsCompatible(numMembers, memberNames, memberTypes)) { // Object if (_binaryObject == null) { _binaryObject = new BinaryObject(); } _binaryObject.Set(objectId, objectMapInfo._objectId); _binaryObject.Write(this); } else if (!typeNameInfo._transmitTypeOnObject) { // ObjectWithMap if (_binaryObjectWithMap == null) { _binaryObjectWithMap = new BinaryObjectWithMap(); } // BCL types are not placed into table assemId = (int)typeNameInfo._assemId; _binaryObjectWithMap.Set(objectId, objectName, numMembers, memberNames, assemId); _binaryObjectWithMap.Write(this); if (objectMapInfo == null) { _objectMapTable.Add(objectName, new ObjectMapInfo(objectId, numMembers, memberNames, memberTypes)); } } else { // ObjectWithMapTyped var binaryTypeEnumA = new BinaryTypeEnum[numMembers]; var typeInformationA = new object?[numMembers]; var assemIdA = new int[numMembers]; for (int i = 0; i < numMembers; i++) { object? typeInformation; binaryTypeEnumA[i] = BinaryTypeConverter.GetBinaryTypeInfo(memberTypes[i], memberObjectInfos[i], null, _objectWriter, out typeInformation, out assemId); typeInformationA[i] = typeInformation; assemIdA[i] = assemId; } if (_binaryObjectWithMapTyped == null) { _binaryObjectWithMapTyped = new BinaryObjectWithMapTyped(); } // BCL types are not placed in table assemId = (int)typeNameInfo._assemId; _binaryObjectWithMapTyped.Set(objectId, objectName, numMembers, memberNames, binaryTypeEnumA, typeInformationA, assemIdA, assemId); _binaryObjectWithMapTyped.Write(this); if (objectMapInfo == null) { _objectMapTable.Add(objectName, new ObjectMapInfo(objectId, numMembers, memberNames, memberTypes)); } } } internal void WriteObjectString(int objectId, string? value) { InternalWriteItemNull(); if (_binaryObjectString == null) { _binaryObjectString = new BinaryObjectString(); } _binaryObjectString.Set(objectId, value); _binaryObjectString.Write(this); } internal void WriteSingleArray(NameInfo memberNameInfo, NameInfo arrayNameInfo, WriteObjectInfo? objectInfo, NameInfo arrayElemTypeNameInfo, int length, int lowerBound, Array array) { InternalWriteItemNull(); BinaryArrayTypeEnum binaryArrayTypeEnum; var lengthA = new int[1]; lengthA[0] = length; int[]? lowerBoundA = null; object? typeInformation; if (lowerBound == 0) { binaryArrayTypeEnum = BinaryArrayTypeEnum.Single; } else { binaryArrayTypeEnum = BinaryArrayTypeEnum.SingleOffset; lowerBoundA = new int[1]; lowerBoundA[0] = lowerBound; } int assemId; BinaryTypeEnum binaryTypeEnum = BinaryTypeConverter.GetBinaryTypeInfo( arrayElemTypeNameInfo._type!, objectInfo, arrayElemTypeNameInfo.NIname, _objectWriter, out typeInformation, out assemId); if (_binaryArray == null) { _binaryArray = new BinaryArray(); } _binaryArray.Set((int)arrayNameInfo._objectId, 1, lengthA, lowerBoundA, binaryTypeEnum, typeInformation, binaryArrayTypeEnum, assemId); _binaryArray.Write(this); if (Converter.IsWriteAsByteArray(arrayElemTypeNameInfo._primitiveTypeEnum) && (lowerBound == 0)) { //array is written out as an array of bytes if (arrayElemTypeNameInfo._primitiveTypeEnum == InternalPrimitiveTypeE.Byte) { WriteBytes((byte[])array); } else if (arrayElemTypeNameInfo._primitiveTypeEnum == InternalPrimitiveTypeE.Char) { WriteChars((char[])array); } else { WriteArrayAsBytes(array, Converter.TypeLength(arrayElemTypeNameInfo._primitiveTypeEnum)); } } } private void WriteArrayAsBytes(Array array, int typeLength) { InternalWriteItemNull(); int arrayOffset = 0; if (_byteBuffer == null) { _byteBuffer = new byte[ChunkSize]; } while (arrayOffset < array.Length) { int numArrayItems = Math.Min(ChunkSize / typeLength, array.Length - arrayOffset); int bufferUsed = numArrayItems * typeLength; Buffer.BlockCopy(array, arrayOffset * typeLength, _byteBuffer, 0, bufferUsed); if (!BitConverter.IsLittleEndian) { // we know that we are writing a primitive type, so just do a simple swap for (int i = 0; i < bufferUsed; i += typeLength) { for (int j = 0; j < typeLength / 2; j++) { byte tmp = _byteBuffer[i + j]; _byteBuffer[i + j] = _byteBuffer[i + typeLength - 1 - j]; _byteBuffer[i + typeLength - 1 - j] = tmp; } } } WriteBytes(_byteBuffer, 0, bufferUsed); arrayOffset += numArrayItems; } } internal void WriteJaggedArray(NameInfo memberNameInfo, NameInfo arrayNameInfo, WriteObjectInfo? objectInfo, NameInfo arrayElemTypeNameInfo, int length, int lowerBound) { InternalWriteItemNull(); BinaryArrayTypeEnum binaryArrayTypeEnum; var lengthA = new int[1]; lengthA[0] = length; int[]? lowerBoundA = null; object? typeInformation; int assemId; if (lowerBound == 0) { binaryArrayTypeEnum = BinaryArrayTypeEnum.Jagged; } else { binaryArrayTypeEnum = BinaryArrayTypeEnum.JaggedOffset; lowerBoundA = new int[1]; lowerBoundA[0] = lowerBound; } BinaryTypeEnum binaryTypeEnum = BinaryTypeConverter.GetBinaryTypeInfo(arrayElemTypeNameInfo._type!, objectInfo, arrayElemTypeNameInfo.NIname, _objectWriter, out typeInformation, out assemId); if (_binaryArray == null) { _binaryArray = new BinaryArray(); } _binaryArray.Set((int)arrayNameInfo._objectId, 1, lengthA, lowerBoundA, binaryTypeEnum, typeInformation, binaryArrayTypeEnum, assemId); _binaryArray.Write(this); } internal void WriteRectangleArray(NameInfo memberNameInfo, NameInfo arrayNameInfo, WriteObjectInfo? objectInfo, NameInfo arrayElemTypeNameInfo, int rank, int[] lengthA, int[] lowerBoundA) { InternalWriteItemNull(); BinaryArrayTypeEnum binaryArrayTypeEnum = BinaryArrayTypeEnum.Rectangular; object? typeInformation; int assemId; BinaryTypeEnum binaryTypeEnum = BinaryTypeConverter.GetBinaryTypeInfo(arrayElemTypeNameInfo._type!, objectInfo, arrayElemTypeNameInfo.NIname, _objectWriter, out typeInformation, out assemId); if (_binaryArray == null) { _binaryArray = new BinaryArray(); } for (int i = 0; i < rank; i++) { if (lowerBoundA[i] != 0) { binaryArrayTypeEnum = BinaryArrayTypeEnum.RectangularOffset; break; } } _binaryArray.Set((int)arrayNameInfo._objectId, rank, lengthA, lowerBoundA, binaryTypeEnum, typeInformation, binaryArrayTypeEnum, assemId); _binaryArray.Write(this); } internal void WriteObjectByteArray(NameInfo memberNameInfo, NameInfo arrayNameInfo, WriteObjectInfo? objectInfo, NameInfo arrayElemTypeNameInfo, int length, int lowerBound, byte[] byteA) { InternalWriteItemNull(); WriteSingleArray(memberNameInfo, arrayNameInfo, objectInfo, arrayElemTypeNameInfo, length, lowerBound, byteA); } internal void WriteMember(NameInfo memberNameInfo, NameInfo typeNameInfo, object value) { InternalWriteItemNull(); InternalPrimitiveTypeE typeInformation = typeNameInfo._primitiveTypeEnum; // Writes Members with primitive values if (memberNameInfo._transmitTypeOnMember) { if (_memberPrimitiveTyped == null) { _memberPrimitiveTyped = new MemberPrimitiveTyped(); } _memberPrimitiveTyped.Set(typeInformation, value); _memberPrimitiveTyped.Write(this); } else { if (_memberPrimitiveUnTyped == null) { _memberPrimitiveUnTyped = new MemberPrimitiveUnTyped(); } _memberPrimitiveUnTyped.Set(typeInformation, value); _memberPrimitiveUnTyped.Write(this); } } internal void WriteNullMember(NameInfo memberNameInfo, NameInfo typeNameInfo) { InternalWriteItemNull(); if (_objectNull == null) { _objectNull = new ObjectNull(); } if (!memberNameInfo._isArrayItem) { _objectNull.SetNullCount(1); _objectNull.Write(this); _consecutiveNullArrayEntryCount = 0; } } internal void WriteMemberObjectRef(NameInfo memberNameInfo, int idRef) { InternalWriteItemNull(); if (_memberReference == null) { _memberReference = new MemberReference(); } _memberReference.Set(idRef); _memberReference.Write(this); } internal void WriteMemberNested(NameInfo memberNameInfo) { InternalWriteItemNull(); } internal void WriteMemberString(NameInfo memberNameInfo, NameInfo typeNameInfo, string? value) { InternalWriteItemNull(); WriteObjectString((int)typeNameInfo._objectId, value); } internal void WriteItem(NameInfo itemNameInfo, NameInfo typeNameInfo, object value) { InternalWriteItemNull(); WriteMember(itemNameInfo, typeNameInfo, value); } internal void WriteNullItem(NameInfo itemNameInfo, NameInfo typeNameInfo) { _consecutiveNullArrayEntryCount++; InternalWriteItemNull(); } internal void WriteDelayedNullItem() { _consecutiveNullArrayEntryCount++; } internal void WriteItemEnd() => InternalWriteItemNull(); private void InternalWriteItemNull() { if (_consecutiveNullArrayEntryCount > 0) { if (_objectNull == null) { _objectNull = new ObjectNull(); } _objectNull.SetNullCount(_consecutiveNullArrayEntryCount); _objectNull.Write(this); _consecutiveNullArrayEntryCount = 0; } } internal void WriteItemObjectRef(NameInfo nameInfo, int idRef) { InternalWriteItemNull(); WriteMemberObjectRef(nameInfo, idRef); } internal void WriteAssembly(Type? type, string assemblyString, int assemId, bool isNew) { //If the file being tested wasn't built as an assembly, then we're going to get null back //for the assembly name. This is very unfortunate. InternalWriteItemNull(); if (assemblyString == null) { assemblyString = string.Empty; } if (isNew) { if (_binaryAssembly == null) { _binaryAssembly = new BinaryAssembly(); } _binaryAssembly.Set(assemId, assemblyString); _binaryAssembly.Write(this); } } // Method to write a value onto a stream given its primitive type code internal void WriteValue(InternalPrimitiveTypeE code, object? value) { switch (code) { case InternalPrimitiveTypeE.Boolean: WriteBoolean(Convert.ToBoolean(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.Byte: WriteByte(Convert.ToByte(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.Char: WriteChar(Convert.ToChar(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.Double: WriteDouble(Convert.ToDouble(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.Int16: WriteInt16(Convert.ToInt16(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.Int32: WriteInt32(Convert.ToInt32(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.Int64: WriteInt64(Convert.ToInt64(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.SByte: WriteSByte(Convert.ToSByte(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.Single: WriteSingle(Convert.ToSingle(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.UInt16: WriteUInt16(Convert.ToUInt16(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.UInt32: WriteUInt32(Convert.ToUInt32(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.UInt64: WriteUInt64(Convert.ToUInt64(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.Decimal: WriteDecimal(Convert.ToDecimal(value, CultureInfo.InvariantCulture)); break; case InternalPrimitiveTypeE.TimeSpan: WriteTimeSpan((TimeSpan)value!); break; case InternalPrimitiveTypeE.DateTime: WriteDateTime((DateTime)value!); break; default: throw new SerializationException(SR.Format(SR.Serialization_TypeCode, code.ToString())); } } private sealed class ObjectMapInfo { internal readonly int _objectId; private readonly int _numMembers; private readonly string[] _memberNames; private readonly Type[] _memberTypes; internal ObjectMapInfo(int objectId, int numMembers, string[] memberNames, Type[] memberTypes) { _objectId = objectId; _numMembers = numMembers; _memberNames = memberNames; _memberTypes = memberTypes; } internal bool IsCompatible(int numMembers, string[] memberNames, Type[]? memberTypes) { if (_numMembers != numMembers) { return false; } for (int i = 0; i < numMembers; i++) { if (!(_memberNames[i].Equals(memberNames[i]))) { return false; } if ((memberTypes != null) && (_memberTypes[i] != memberTypes[i])) { return false; } } return true; } } } }
-1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/coreclr/tools/Common/Compiler/TypeExtensions.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using Internal.IL; using Internal.TypeSystem; using Debug = System.Diagnostics.Debug; namespace ILCompiler { public static class TypeExtensions { public static bool IsSealed(this TypeDesc type) { var metadataType = type as MetadataType; if (metadataType != null) { return metadataType.IsSealed || metadataType.IsModuleType; } Debug.Assert(type.IsArray, "IsSealed on a type with no virtual methods?"); return true; } /// <summary> /// Gets the type that defines virtual method slots for the specified type. /// </summary> public static DefType GetClosestDefType(this TypeDesc type) { return ((CompilerTypeSystemContext)type.Context).GetClosestDefType(type); } /// <summary> /// Gets a value indicating whether the method requires a hidden instantiation argument in addition /// to the formal arguments defined in the method signature. /// </summary> public static bool RequiresInstArg(this MethodDesc method) { return method.IsSharedByGenericInstantiations && (method.HasInstantiation || method.Signature.IsStatic || method.ImplementationType.IsValueType || (method.ImplementationType.IsInterface && !method.IsAbstract)); } /// <summary> /// Gets a value indicating whether the method acquires the generic context from a hidden /// instantiation argument that points to the method's generic dictionary. /// </summary> public static bool RequiresInstMethodDescArg(this MethodDesc method) { return method.HasInstantiation && method.IsSharedByGenericInstantiations; } /// <summary> /// Gets a value indicating whether the method acquires the generic context from a hidden /// instantiation argument that points to the generic dictionary of the method's owning type. /// </summary> public static bool RequiresInstMethodTableArg(this MethodDesc method) { return (method.Signature.IsStatic || method.ImplementationType.IsValueType || (method.ImplementationType.IsInterface && !method.IsAbstract)) && method.IsSharedByGenericInstantiations && !method.HasInstantiation; } /// <summary> /// Gets a value indicating whether the method acquires the generic context from the this pointer. /// </summary> public static bool AcquiresInstMethodTableFromThis(this MethodDesc method) { return method.IsSharedByGenericInstantiations && !method.HasInstantiation && !method.Signature.IsStatic && !method.ImplementationType.IsValueType && !(method.ImplementationType.IsInterface && !method.IsAbstract); } /// <summary> /// Returns true if '<paramref name="method"/>' is the "Address" method on multidimensional array types. /// </summary> public static bool IsArrayAddressMethod(this MethodDesc method) { var arrayMethod = method as ArrayMethod; return arrayMethod != null && arrayMethod.Kind == ArrayMethodKind.Address; } /// <summary> /// Returns true if '<paramref name="method"/>' is one of the special methods on multidimensional array types (set, get, address). /// </summary> public static bool IsArrayMethod(this MethodDesc method) { var arrayMethod = method as ArrayMethod; return arrayMethod != null && (arrayMethod.Kind == ArrayMethodKind.Address || arrayMethod.Kind == ArrayMethodKind.Get || arrayMethod.Kind == ArrayMethodKind.Set || arrayMethod.Kind == ArrayMethodKind.Ctor); } /// <summary> /// Gets a value indicating whether this type has any generic virtual methods. /// </summary> public static bool HasGenericVirtualMethods(this TypeDesc type) { foreach (var method in type.GetAllMethods()) { if (method.IsVirtual && method.HasInstantiation) return true; } return false; } /// <summary> /// Wrapper helper function around the IsCanonicalDefinitionType API on the TypeSystemContext /// </summary> public static bool IsCanonicalDefinitionType(this TypeDesc type, CanonicalFormKind kind) { return type.Context.IsCanonicalDefinitionType(type, kind); } /// <summary> /// Gets the value of the field ordinal. Ordinals are computed by also including static fields, but excluding /// literal fields and fields with RVAs. /// </summary> public static int GetFieldOrdinal(this FieldDesc inputField) { // Make sure we are asking the question for a valid instance or static field Debug.Assert(!inputField.HasRva && !inputField.IsLiteral); int fieldOrdinal = 0; foreach (FieldDesc field in inputField.OwningType.GetFields()) { // If this field does not contribute to layout, skip if (field.HasRva || field.IsLiteral) continue; if (field == inputField) return fieldOrdinal; fieldOrdinal++; } Debug.Assert(false); return -1; } /// <summary> /// What is the maximum number of steps that need to be taken from this type to its most contained generic type. /// i.e. /// System.Int32 => 0 /// List&lt;System.Int32&gt; => 1 /// Dictionary&lt;System.Int32,System.Int32&gt; => 1 /// Dictionary&lt;List&lt;System.Int32&gt;,&lt;System.Int32&gt; => 2 /// </summary> public static int GetGenericDepth(this TypeDesc type) { if (type.HasInstantiation) { int maxGenericDepthInInstantiation = 0; foreach (TypeDesc instantiationType in type.Instantiation) { maxGenericDepthInInstantiation = Math.Max(instantiationType.GetGenericDepth(), maxGenericDepthInInstantiation); } return maxGenericDepthInInstantiation + 1; } return 0; } /// <summary> /// Determine if a type has a generic depth greater than a given value /// </summary> public static bool IsGenericDepthGreaterThan(this TypeDesc type, int depth) { if (depth < 0) return true; foreach (TypeDesc instantiationType in type.Instantiation) { if (instantiationType.IsGenericDepthGreaterThan(depth - 1)) return true; } return false; } /// <summary> /// What is the maximum number of steps that need to be taken from this type to its most contained generic type. /// i.e. /// SomeGenericType&lt;System.Int32&gt;.Method&lt;System.Int32&gt; => 1 /// SomeType.Method&lt;System.Int32&gt; => 0 /// SomeType.Method&lt;List&lt;System.Int32&gt;&gt; => 1 /// </summary> public static int GetGenericDepth(this MethodDesc method) { int genericDepth = method.OwningType.GetGenericDepth(); foreach (TypeDesc type in method.Instantiation) { genericDepth = Math.Max(genericDepth, type.GetGenericDepth()); } return genericDepth; } /// <summary> /// Determine if a type has a generic depth greater than a given value /// </summary> /// <param name="depth"></param> /// <returns></returns> public static bool IsGenericDepthGreaterThan(this MethodDesc method, int depth) { if (method.OwningType.IsGenericDepthGreaterThan(depth)) return true; foreach (TypeDesc type in method.Instantiation) { if (type.IsGenericDepthGreaterThan(depth)) return true; } return false; } /// <summary> /// Determines whether an array type does implements the generic collection interfaces. This is the case /// for multi-dimensional arrays, and arrays of pointers. /// </summary> public static bool IsArrayTypeWithoutGenericInterfaces(this TypeDesc type) { if (!type.IsArray) return false; var arrayType = (ArrayType)type; TypeDesc elementType = arrayType.ElementType; return type.IsMdArray || elementType.IsPointer || elementType.IsFunctionPointer; } public static TypeDesc MergeTypesToCommonParent(TypeDesc ta, TypeDesc tb) { if (ta == tb) { return ta; } // Handle the array case if (ta.IsArray) { if (tb.IsArray) { return MergeArrayTypesToCommonParent((ArrayType)ta, (ArrayType)tb); } else if (tb.IsInterface) { // Check to see if we can merge the array to a common interface (such as Derived[] and IList<Base>) if (ta.CanCastTo(tb)) { return tb; } } // keep merging from here ta = ta.Context.GetWellKnownType(WellKnownType.Array); } else if (tb.IsArray) { if (ta.IsInterface && tb.CanCastTo(ta)) { return ta; } tb = tb.Context.GetWellKnownType(WellKnownType.Array); } Debug.Assert(ta.IsDefType); Debug.Assert(tb.IsDefType); if (tb.IsInterface) { if (ta.IsInterface) { // // Both classes are interfaces. Check that if one // interface extends the other. // // Does tb extend ta ? // if (tb.ImplementsEquivalentInterface(ta)) { return ta; } // // Does tb extend ta ? // if (ta.ImplementsEquivalentInterface(tb)) { return tb; } // No compatible merge found - using Object return ta.Context.GetWellKnownType(WellKnownType.Object); } else { return MergeClassWithInterface(ta, tb); } } else if (ta.IsInterface) { return MergeClassWithInterface(tb, ta); } int aDepth = 0; int bDepth = 0; // find the depth in the class hierarchy for each class for (TypeDesc searchType = ta; searchType != null; searchType = searchType.BaseType) { aDepth++; } for (TypeDesc searchType = tb; searchType != null; searchType = searchType.BaseType) { bDepth++; } // for whichever class is lower down in the hierarchy, walk up the superclass chain // to the same level as the other class while (aDepth > bDepth) { ta = ta.BaseType; aDepth--; } while (bDepth > aDepth) { tb = tb.BaseType; bDepth--; } while (ta != tb) { ta = ta.BaseType; tb = tb.BaseType; } // If no compatible merge is found, we end up using Object Debug.Assert(ta != null); return ta; } private static TypeDesc MergeArrayTypesToCommonParent(ArrayType ta, ArrayType tb) { Debug.Assert(ta.IsArray && tb.IsArray && ta != tb); // if no match on the rank the common ancestor is System.Array if (ta.IsSzArray != tb.IsSzArray || ta.Rank != tb.Rank) { return ta.Context.GetWellKnownType(WellKnownType.Array); } TypeDesc taElem = ta.ElementType; TypeDesc tbElem = tb.ElementType; Debug.Assert(taElem != tbElem); TypeDesc mergeElem; if (taElem.IsArray && tbElem.IsArray) { mergeElem = MergeArrayTypesToCommonParent((ArrayType)taElem, (ArrayType)tbElem); } else if (taElem.IsGCPointer && tbElem.IsGCPointer) { // Find the common ancestor of the element types. mergeElem = MergeTypesToCommonParent(taElem, tbElem); } else { // The element types have nothing in common. return ta.Context.GetWellKnownType(WellKnownType.Array); } if (mergeElem == taElem) { return ta; } if (mergeElem == tbElem) { return tb; } if (taElem.IsMdArray) { return mergeElem.MakeArrayType(ta.Rank); } return mergeElem.MakeArrayType(); } private static bool ImplementsEquivalentInterface(this TypeDesc type, TypeDesc interfaceType) { foreach (DefType implementedInterface in type.RuntimeInterfaces) { if (implementedInterface == interfaceType) { return true; } } return false; } /// <summary> /// Attempts to resolve constrained call to <paramref name="interfaceMethod"/> into a concrete non-unboxing /// method on <paramref name="constrainedType"/>. /// The ability to resolve constraint methods is affected by the degree of code sharing we are performing /// for generic code. /// </summary> /// <returns>The resolved method or null if the constraint couldn't be resolved.</returns> public static MethodDesc TryResolveConstraintMethodApprox(this TypeDesc constrainedType, TypeDesc interfaceType, MethodDesc interfaceMethod, out bool forceRuntimeLookup) { forceRuntimeLookup = false; // We can't resolve constraint calls effectively for reference types, and there's // not a lot of perf. benefit in doing it anyway. if (!constrainedType.IsValueType) { return null; } // Interface method may or may not be fully canonicalized here. // It would be canonical on the CoreCLR side so canonicalize here to keep the algorithms similar. Instantiation methodInstantiation = interfaceMethod.Instantiation; interfaceMethod = interfaceMethod.GetCanonMethodTarget(CanonicalFormKind.Specific); // 1. Find the (possibly generic) method that would implement the // constraint if we were making a call on a boxed value type. TypeDesc canonType = constrainedType.ConvertToCanonForm(CanonicalFormKind.Specific); TypeSystemContext context = constrainedType.Context; MethodDesc genInterfaceMethod = interfaceMethod.GetMethodDefinition(); MethodDesc method = null; if (genInterfaceMethod.OwningType.IsInterface) { // Sometimes (when compiling shared generic code) // we don't have enough exact type information at JIT time // even to decide whether we will be able to resolve to an unboxed entry point... // To cope with this case we always go via the helper function if there's any // chance of this happening by checking for all interfaces which might possibly // be compatible with the call (verification will have ensured that // at least one of them will be) // Enumerate all potential interface instantiations int potentialMatchingInterfaces = 0; foreach (DefType potentialInterfaceType in canonType.RuntimeInterfaces) { if (potentialInterfaceType.ConvertToCanonForm(CanonicalFormKind.Specific) == interfaceType.ConvertToCanonForm(CanonicalFormKind.Specific)) { potentialMatchingInterfaces++; MethodDesc potentialInterfaceMethod = genInterfaceMethod; if (potentialInterfaceMethod.OwningType != potentialInterfaceType) { potentialInterfaceMethod = context.GetMethodForInstantiatedType( potentialInterfaceMethod.GetTypicalMethodDefinition(), (InstantiatedType)potentialInterfaceType); } method = canonType.ResolveInterfaceMethodToVirtualMethodOnType(potentialInterfaceMethod); // See code:#TryResolveConstraintMethodApprox_DoNotReturnParentMethod if (method != null && !method.OwningType.IsValueType) { // We explicitly wouldn't want to abort if we found a default implementation. // The above resolution doesn't consider the default methods. Debug.Assert(!method.OwningType.IsInterface); return null; } } } Debug.Assert(potentialMatchingInterfaces != 0); if (potentialMatchingInterfaces > 1) { // We have more potentially matching interfaces Debug.Assert(interfaceType.HasInstantiation); bool isExactMethodResolved = false; if (!interfaceType.IsCanonicalSubtype(CanonicalFormKind.Any) && !interfaceType.IsGenericDefinition && !constrainedType.IsCanonicalSubtype(CanonicalFormKind.Any) && !constrainedType.IsGenericDefinition) { // We have exact interface and type instantiations (no generic variables and __Canon used // anywhere) if (constrainedType.CanCastTo(interfaceType)) { // We can resolve to exact method MethodDesc exactInterfaceMethod = context.GetMethodForInstantiatedType( genInterfaceMethod.GetTypicalMethodDefinition(), (InstantiatedType)interfaceType); method = constrainedType.ResolveVariantInterfaceMethodToVirtualMethodOnType(exactInterfaceMethod); isExactMethodResolved = method != null; } } if (!isExactMethodResolved) { // We couldn't resolve the interface statically // Notify the caller that it should use runtime lookup // Note that we can leave pMD incorrect, because we will use runtime lookup forceRuntimeLookup = true; } } else { // If we can resolve the interface exactly then do so (e.g. when doing the exact // lookup at runtime, or when not sharing generic code). if (constrainedType.CanCastTo(interfaceType)) { MethodDesc exactInterfaceMethod = genInterfaceMethod; if (genInterfaceMethod.OwningType != interfaceType) exactInterfaceMethod = context.GetMethodForInstantiatedType( genInterfaceMethod.GetTypicalMethodDefinition(), (InstantiatedType)interfaceType); method = constrainedType.ResolveVariantInterfaceMethodToVirtualMethodOnType(exactInterfaceMethod); } } } else if (genInterfaceMethod.IsVirtual) { MethodDesc targetMethod = interfaceType.FindMethodOnTypeWithMatchingTypicalMethod(genInterfaceMethod); method = constrainedType.FindVirtualFunctionTargetMethodOnObjectType(targetMethod); } else { // The method will be null if calling a non-virtual instance // methods on System.Object, i.e. when these are used as a constraint. method = null; } if (method == null) { // Fall back to VSD return null; } //#TryResolveConstraintMethodApprox_DoNotReturnParentMethod // Only return a method if the value type itself declares the method, // otherwise we might get a method from Object or System.ValueType if (!method.OwningType.IsValueType) { // Fall back to VSD return null; } // We've resolved the method, ignoring its generic method arguments // If the method is a generic method then go and get the instantiated descriptor if (methodInstantiation.Length != 0) { method = method.MakeInstantiatedMethod(methodInstantiation); } Debug.Assert(method != null); return method; } private static TypeDesc MergeClassWithInterface(TypeDesc type, TypeDesc interfaceType) { // Check if the class implements the interface if (type.ImplementsEquivalentInterface(interfaceType)) { return interfaceType; } // Check if the class and the interface implement a common interface foreach (var potentialCommonInterface in interfaceType.RuntimeInterfaces) { if (type.ImplementsEquivalentInterface(potentialCommonInterface)) { // Found a common interface. If there are multiple common interfaces, then // the problem is ambiguous so we'll just take the first one--it's the best // we can do. return potentialCommonInterface; } } // No compatible merge found - using Object return type.Context.GetWellKnownType(WellKnownType.Object); } /// <summary> /// Normalizes canonical instantiations (converts Foo&lt;object, __Canon&gt; to /// Foo&lt;__Canon, __Canon>). Returns identity for non-canonical types. /// </summary> public static TypeDesc NormalizeInstantiation(this TypeDesc thisType) { if (thisType.IsCanonicalSubtype(CanonicalFormKind.Any)) return thisType.ConvertToCanonForm(CanonicalFormKind.Specific); return thisType; } public static Instantiation GetInstantiationThatMeetsConstraints(Instantiation inst, bool allowCanon) { TypeDesc[] resultArray = new TypeDesc[inst.Length]; for (int i = 0; i < inst.Length; i++) { TypeDesc instArg = GetTypeThatMeetsConstraints((GenericParameterDesc)inst[i], allowCanon); if (instArg == null) return default(Instantiation); resultArray[i] = instArg; } return new Instantiation(resultArray); } private static TypeDesc GetTypeThatMeetsConstraints(GenericParameterDesc genericParam, bool allowCanon) { TypeSystemContext context = genericParam.Context; // Universal canon is the best option if it's supported if (allowCanon && context.SupportsUniversalCanon) return context.UniversalCanonType; // Not nullable type is the only thing where we can't substitute reference types GenericConstraints constraints = genericParam.Constraints; if ((constraints & GenericConstraints.NotNullableValueTypeConstraint) != 0) return null; // If canon is allowed, we can use that if (allowCanon && context.SupportsCanon) { foreach (var c in genericParam.TypeConstraints) { // Could be e.g. "where T : U" // We could try to dig into the U and solve it, but that just opens us up to // recursion and it's just not worth it. if (c.IsSignatureVariable) return null; if (!c.IsGCPointer) return null; } return genericParam.Context.CanonType; } // If canon is not allowed, we're limited in our choices. TypeDesc constrainedType = null; foreach (var c in genericParam.TypeConstraints) { // Can't do multiple constraints if (constrainedType != null) return null; // Could be e.g. "where T : IFoo<U>" or "where T : U" if (c.ContainsSignatureVariables()) return null; constrainedType = c; } return constrainedType ?? genericParam.Context.GetWellKnownType(WellKnownType.Object); } public static bool ContainsSignatureVariables(this Instantiation instantiation, bool treatGenericParameterLikeSignatureVariable = false) { foreach (var arg in instantiation) { if (arg.ContainsSignatureVariables(treatGenericParameterLikeSignatureVariable)) return true; } return false; } /// <summary> /// Return true when the type in question is marked with the NonVersionable attribute. /// </summary> /// <param name="type">Type to check</param> /// <returns>True when the type is marked with the non-versionable custom attribute, false otherwise.</returns> public static bool IsNonVersionable(this MetadataType type) { return type.HasCustomAttribute("System.Runtime.Versioning", "NonVersionableAttribute"); } /// <summary> /// Return true when the method is marked as non-versionable. Non-versionable methods /// may be freely inlined into ReadyToRun images even when they don't reside in the /// same version bubble as the module being compiled. /// </summary> /// <param name="method">Method to check</param> /// <returns>True when the method is marked as non-versionable, false otherwise.</returns> public static bool IsNonVersionable(this MethodDesc method) { return method.HasCustomAttribute("System.Runtime.Versioning", "NonVersionableAttribute"); } /// <summary> /// Returns true if <paramref name="method"/> is an actual native entrypoint. /// There's a distinction between when a method reports it's a PInvoke in the metadata /// versus how it's treated in the compiler. For many PInvoke methods the compiler will generate /// an IL body. The methods with an IL method body shouldn't be treated as PInvoke within the compiler. /// </summary> public static bool IsRawPInvoke(this MethodDesc method) { return method.IsPInvoke && (method is Internal.IL.Stubs.PInvokeTargetNativeMethod); } public static bool IsDynamicInterfaceCastableImplementation(this MetadataType interfaceType) { Debug.Assert(interfaceType.IsInterface); return interfaceType.HasCustomAttribute("System.Runtime.InteropServices", "DynamicInterfaceCastableImplementationAttribute"); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using Internal.IL; using Internal.TypeSystem; using Debug = System.Diagnostics.Debug; namespace ILCompiler { public static class TypeExtensions { public static bool IsSealed(this TypeDesc type) { var metadataType = type as MetadataType; if (metadataType != null) { return metadataType.IsSealed || metadataType.IsModuleType; } Debug.Assert(type.IsArray, "IsSealed on a type with no virtual methods?"); return true; } /// <summary> /// Gets the type that defines virtual method slots for the specified type. /// </summary> public static DefType GetClosestDefType(this TypeDesc type) { return ((CompilerTypeSystemContext)type.Context).GetClosestDefType(type); } /// <summary> /// Gets a value indicating whether the method requires a hidden instantiation argument in addition /// to the formal arguments defined in the method signature. /// </summary> public static bool RequiresInstArg(this MethodDesc method) { return method.IsSharedByGenericInstantiations && (method.HasInstantiation || method.Signature.IsStatic || method.ImplementationType.IsValueType || (method.ImplementationType.IsInterface && !method.IsAbstract)); } /// <summary> /// Gets a value indicating whether the method acquires the generic context from a hidden /// instantiation argument that points to the method's generic dictionary. /// </summary> public static bool RequiresInstMethodDescArg(this MethodDesc method) { return method.HasInstantiation && method.IsSharedByGenericInstantiations; } /// <summary> /// Gets a value indicating whether the method acquires the generic context from a hidden /// instantiation argument that points to the generic dictionary of the method's owning type. /// </summary> public static bool RequiresInstMethodTableArg(this MethodDesc method) { return (method.Signature.IsStatic || method.ImplementationType.IsValueType || (method.ImplementationType.IsInterface && !method.IsAbstract)) && method.IsSharedByGenericInstantiations && !method.HasInstantiation; } /// <summary> /// Gets a value indicating whether the method acquires the generic context from the this pointer. /// </summary> public static bool AcquiresInstMethodTableFromThis(this MethodDesc method) { return method.IsSharedByGenericInstantiations && !method.HasInstantiation && !method.Signature.IsStatic && !method.ImplementationType.IsValueType && !(method.ImplementationType.IsInterface && !method.IsAbstract); } /// <summary> /// Returns true if '<paramref name="method"/>' is the "Address" method on multidimensional array types. /// </summary> public static bool IsArrayAddressMethod(this MethodDesc method) { var arrayMethod = method as ArrayMethod; return arrayMethod != null && arrayMethod.Kind == ArrayMethodKind.Address; } /// <summary> /// Returns true if '<paramref name="method"/>' is one of the special methods on multidimensional array types (set, get, address). /// </summary> public static bool IsArrayMethod(this MethodDesc method) { var arrayMethod = method as ArrayMethod; return arrayMethod != null && (arrayMethod.Kind == ArrayMethodKind.Address || arrayMethod.Kind == ArrayMethodKind.Get || arrayMethod.Kind == ArrayMethodKind.Set || arrayMethod.Kind == ArrayMethodKind.Ctor); } /// <summary> /// Gets a value indicating whether this type has any generic virtual methods. /// </summary> public static bool HasGenericVirtualMethods(this TypeDesc type) { foreach (var method in type.GetAllMethods()) { if (method.IsVirtual && method.HasInstantiation) return true; } return false; } /// <summary> /// Wrapper helper function around the IsCanonicalDefinitionType API on the TypeSystemContext /// </summary> public static bool IsCanonicalDefinitionType(this TypeDesc type, CanonicalFormKind kind) { return type.Context.IsCanonicalDefinitionType(type, kind); } /// <summary> /// Gets the value of the field ordinal. Ordinals are computed by also including static fields, but excluding /// literal fields and fields with RVAs. /// </summary> public static int GetFieldOrdinal(this FieldDesc inputField) { // Make sure we are asking the question for a valid instance or static field Debug.Assert(!inputField.HasRva && !inputField.IsLiteral); int fieldOrdinal = 0; foreach (FieldDesc field in inputField.OwningType.GetFields()) { // If this field does not contribute to layout, skip if (field.HasRva || field.IsLiteral) continue; if (field == inputField) return fieldOrdinal; fieldOrdinal++; } Debug.Assert(false); return -1; } /// <summary> /// What is the maximum number of steps that need to be taken from this type to its most contained generic type. /// i.e. /// System.Int32 => 0 /// List&lt;System.Int32&gt; => 1 /// Dictionary&lt;System.Int32,System.Int32&gt; => 1 /// Dictionary&lt;List&lt;System.Int32&gt;,&lt;System.Int32&gt; => 2 /// </summary> public static int GetGenericDepth(this TypeDesc type) { if (type.HasInstantiation) { int maxGenericDepthInInstantiation = 0; foreach (TypeDesc instantiationType in type.Instantiation) { maxGenericDepthInInstantiation = Math.Max(instantiationType.GetGenericDepth(), maxGenericDepthInInstantiation); } return maxGenericDepthInInstantiation + 1; } return 0; } /// <summary> /// Determine if a type has a generic depth greater than a given value /// </summary> public static bool IsGenericDepthGreaterThan(this TypeDesc type, int depth) { if (depth < 0) return true; foreach (TypeDesc instantiationType in type.Instantiation) { if (instantiationType.IsGenericDepthGreaterThan(depth - 1)) return true; } return false; } /// <summary> /// What is the maximum number of steps that need to be taken from this type to its most contained generic type. /// i.e. /// SomeGenericType&lt;System.Int32&gt;.Method&lt;System.Int32&gt; => 1 /// SomeType.Method&lt;System.Int32&gt; => 0 /// SomeType.Method&lt;List&lt;System.Int32&gt;&gt; => 1 /// </summary> public static int GetGenericDepth(this MethodDesc method) { int genericDepth = method.OwningType.GetGenericDepth(); foreach (TypeDesc type in method.Instantiation) { genericDepth = Math.Max(genericDepth, type.GetGenericDepth()); } return genericDepth; } /// <summary> /// Determine if a type has a generic depth greater than a given value /// </summary> /// <param name="depth"></param> /// <returns></returns> public static bool IsGenericDepthGreaterThan(this MethodDesc method, int depth) { if (method.OwningType.IsGenericDepthGreaterThan(depth)) return true; foreach (TypeDesc type in method.Instantiation) { if (type.IsGenericDepthGreaterThan(depth)) return true; } return false; } /// <summary> /// Determines whether an array type does implements the generic collection interfaces. This is the case /// for multi-dimensional arrays, and arrays of pointers. /// </summary> public static bool IsArrayTypeWithoutGenericInterfaces(this TypeDesc type) { if (!type.IsArray) return false; var arrayType = (ArrayType)type; TypeDesc elementType = arrayType.ElementType; return type.IsMdArray || elementType.IsPointer || elementType.IsFunctionPointer; } public static TypeDesc MergeTypesToCommonParent(TypeDesc ta, TypeDesc tb) { if (ta == tb) { return ta; } // Handle the array case if (ta.IsArray) { if (tb.IsArray) { return MergeArrayTypesToCommonParent((ArrayType)ta, (ArrayType)tb); } else if (tb.IsInterface) { // Check to see if we can merge the array to a common interface (such as Derived[] and IList<Base>) if (ta.CanCastTo(tb)) { return tb; } } // keep merging from here ta = ta.Context.GetWellKnownType(WellKnownType.Array); } else if (tb.IsArray) { if (ta.IsInterface && tb.CanCastTo(ta)) { return ta; } tb = tb.Context.GetWellKnownType(WellKnownType.Array); } Debug.Assert(ta.IsDefType); Debug.Assert(tb.IsDefType); if (tb.IsInterface) { if (ta.IsInterface) { // // Both classes are interfaces. Check that if one // interface extends the other. // // Does tb extend ta ? // if (tb.ImplementsEquivalentInterface(ta)) { return ta; } // // Does tb extend ta ? // if (ta.ImplementsEquivalentInterface(tb)) { return tb; } // No compatible merge found - using Object return ta.Context.GetWellKnownType(WellKnownType.Object); } else { return MergeClassWithInterface(ta, tb); } } else if (ta.IsInterface) { return MergeClassWithInterface(tb, ta); } int aDepth = 0; int bDepth = 0; // find the depth in the class hierarchy for each class for (TypeDesc searchType = ta; searchType != null; searchType = searchType.BaseType) { aDepth++; } for (TypeDesc searchType = tb; searchType != null; searchType = searchType.BaseType) { bDepth++; } // for whichever class is lower down in the hierarchy, walk up the superclass chain // to the same level as the other class while (aDepth > bDepth) { ta = ta.BaseType; aDepth--; } while (bDepth > aDepth) { tb = tb.BaseType; bDepth--; } while (ta != tb) { ta = ta.BaseType; tb = tb.BaseType; } // If no compatible merge is found, we end up using Object Debug.Assert(ta != null); return ta; } private static TypeDesc MergeArrayTypesToCommonParent(ArrayType ta, ArrayType tb) { Debug.Assert(ta.IsArray && tb.IsArray && ta != tb); // if no match on the rank the common ancestor is System.Array if (ta.IsSzArray != tb.IsSzArray || ta.Rank != tb.Rank) { return ta.Context.GetWellKnownType(WellKnownType.Array); } TypeDesc taElem = ta.ElementType; TypeDesc tbElem = tb.ElementType; Debug.Assert(taElem != tbElem); TypeDesc mergeElem; if (taElem.IsArray && tbElem.IsArray) { mergeElem = MergeArrayTypesToCommonParent((ArrayType)taElem, (ArrayType)tbElem); } else if (taElem.IsGCPointer && tbElem.IsGCPointer) { // Find the common ancestor of the element types. mergeElem = MergeTypesToCommonParent(taElem, tbElem); } else { // The element types have nothing in common. return ta.Context.GetWellKnownType(WellKnownType.Array); } if (mergeElem == taElem) { return ta; } if (mergeElem == tbElem) { return tb; } if (taElem.IsMdArray) { return mergeElem.MakeArrayType(ta.Rank); } return mergeElem.MakeArrayType(); } private static bool ImplementsEquivalentInterface(this TypeDesc type, TypeDesc interfaceType) { foreach (DefType implementedInterface in type.RuntimeInterfaces) { if (implementedInterface == interfaceType) { return true; } } return false; } /// <summary> /// Attempts to resolve constrained call to <paramref name="interfaceMethod"/> into a concrete non-unboxing /// method on <paramref name="constrainedType"/>. /// The ability to resolve constraint methods is affected by the degree of code sharing we are performing /// for generic code. /// </summary> /// <returns>The resolved method or null if the constraint couldn't be resolved.</returns> public static MethodDesc TryResolveConstraintMethodApprox(this TypeDesc constrainedType, TypeDesc interfaceType, MethodDesc interfaceMethod, out bool forceRuntimeLookup) { forceRuntimeLookup = false; bool isStaticVirtualMethod = interfaceMethod.Signature.IsStatic; // We can't resolve constraint calls effectively for reference types, and there's // not a lot of perf. benefit in doing it anyway. if (!constrainedType.IsValueType && (!isStaticVirtualMethod || constrainedType.IsCanonicalDefinitionType(CanonicalFormKind.Any))) { return null; } // Interface method may or may not be fully canonicalized here. // It would be canonical on the CoreCLR side so canonicalize here to keep the algorithms similar. Instantiation methodInstantiation = interfaceMethod.Instantiation; interfaceMethod = interfaceMethod.GetCanonMethodTarget(CanonicalFormKind.Specific); // 1. Find the (possibly generic) method that would implement the // constraint if we were making a call on a boxed value type. TypeDesc canonType = constrainedType.ConvertToCanonForm(CanonicalFormKind.Specific); TypeSystemContext context = constrainedType.Context; MethodDesc genInterfaceMethod = interfaceMethod.GetMethodDefinition(); MethodDesc method = null; if (genInterfaceMethod.OwningType.IsInterface) { // Sometimes (when compiling shared generic code) // we don't have enough exact type information at JIT time // even to decide whether we will be able to resolve to an unboxed entry point... // To cope with this case we always go via the helper function if there's any // chance of this happening by checking for all interfaces which might possibly // be compatible with the call (verification will have ensured that // at least one of them will be) // Enumerate all potential interface instantiations int potentialMatchingInterfaces = 0; foreach (DefType potentialInterfaceType in canonType.RuntimeInterfaces) { if (potentialInterfaceType.ConvertToCanonForm(CanonicalFormKind.Specific) == interfaceType.ConvertToCanonForm(CanonicalFormKind.Specific)) { potentialMatchingInterfaces++; MethodDesc potentialInterfaceMethod = genInterfaceMethod; if (potentialInterfaceMethod.OwningType != potentialInterfaceType) { potentialInterfaceMethod = context.GetMethodForInstantiatedType( potentialInterfaceMethod.GetTypicalMethodDefinition(), (InstantiatedType)potentialInterfaceType); } if (isStaticVirtualMethod) { method = canonType.ResolveVariantInterfaceMethodToStaticVirtualMethodOnType(potentialInterfaceMethod); } else { method = canonType.ResolveInterfaceMethodToVirtualMethodOnType(potentialInterfaceMethod); } // See code:#TryResolveConstraintMethodApprox_DoNotReturnParentMethod if (!isStaticVirtualMethod && method != null && !method.OwningType.IsValueType) { // We explicitly wouldn't want to abort if we found a default implementation. // The above resolution doesn't consider the default methods. Debug.Assert(!method.OwningType.IsInterface); return null; } } } Debug.Assert(potentialMatchingInterfaces != 0); if (potentialMatchingInterfaces > 1) { // We have more potentially matching interfaces Debug.Assert(interfaceType.HasInstantiation); bool isExactMethodResolved = false; if (!interfaceType.IsCanonicalSubtype(CanonicalFormKind.Any) && !interfaceType.IsGenericDefinition && !constrainedType.IsCanonicalSubtype(CanonicalFormKind.Any) && !constrainedType.IsGenericDefinition) { // We have exact interface and type instantiations (no generic variables and __Canon used // anywhere) if (constrainedType.CanCastTo(interfaceType)) { // We can resolve to exact method MethodDesc exactInterfaceMethod = context.GetMethodForInstantiatedType( genInterfaceMethod.GetTypicalMethodDefinition(), (InstantiatedType)interfaceType); if (isStaticVirtualMethod) { method = constrainedType.ResolveVariantInterfaceMethodToStaticVirtualMethodOnType(exactInterfaceMethod); } else { method = constrainedType.ResolveVariantInterfaceMethodToVirtualMethodOnType(exactInterfaceMethod); } isExactMethodResolved = method != null; } } if (!isExactMethodResolved) { // We couldn't resolve the interface statically // Notify the caller that it should use runtime lookup // Note that we can leave pMD incorrect, because we will use runtime lookup forceRuntimeLookup = true; } } else { // If we can resolve the interface exactly then do so (e.g. when doing the exact // lookup at runtime, or when not sharing generic code). if (constrainedType.CanCastTo(interfaceType)) { MethodDesc exactInterfaceMethod = genInterfaceMethod; if (genInterfaceMethod.OwningType != interfaceType) exactInterfaceMethod = context.GetMethodForInstantiatedType( genInterfaceMethod.GetTypicalMethodDefinition(), (InstantiatedType)interfaceType); if (isStaticVirtualMethod) { method = constrainedType.ResolveVariantInterfaceMethodToStaticVirtualMethodOnType(exactInterfaceMethod); } else { method = constrainedType.ResolveVariantInterfaceMethodToVirtualMethodOnType(exactInterfaceMethod); } } } } else if (genInterfaceMethod.IsVirtual) { MethodDesc targetMethod = interfaceType.FindMethodOnTypeWithMatchingTypicalMethod(genInterfaceMethod); method = constrainedType.FindVirtualFunctionTargetMethodOnObjectType(targetMethod); } else { // The method will be null if calling a non-virtual instance // methods on System.Object, i.e. when these are used as a constraint. method = null; } if (method == null) { // Fall back to VSD return null; } //#TryResolveConstraintMethodApprox_DoNotReturnParentMethod // Only return a method if the value type itself declares the method, // otherwise we might get a method from Object or System.ValueType if (!isStaticVirtualMethod && !method.OwningType.IsValueType) { // Fall back to VSD return null; } // We've resolved the method, ignoring its generic method arguments // If the method is a generic method then go and get the instantiated descriptor if (methodInstantiation.Length != 0) { method = method.MakeInstantiatedMethod(methodInstantiation); } Debug.Assert(method != null); return method; } private static TypeDesc MergeClassWithInterface(TypeDesc type, TypeDesc interfaceType) { // Check if the class implements the interface if (type.ImplementsEquivalentInterface(interfaceType)) { return interfaceType; } // Check if the class and the interface implement a common interface foreach (var potentialCommonInterface in interfaceType.RuntimeInterfaces) { if (type.ImplementsEquivalentInterface(potentialCommonInterface)) { // Found a common interface. If there are multiple common interfaces, then // the problem is ambiguous so we'll just take the first one--it's the best // we can do. return potentialCommonInterface; } } // No compatible merge found - using Object return type.Context.GetWellKnownType(WellKnownType.Object); } /// <summary> /// Normalizes canonical instantiations (converts Foo&lt;object, __Canon&gt; to /// Foo&lt;__Canon, __Canon>). Returns identity for non-canonical types. /// </summary> public static TypeDesc NormalizeInstantiation(this TypeDesc thisType) { if (thisType.IsCanonicalSubtype(CanonicalFormKind.Any)) return thisType.ConvertToCanonForm(CanonicalFormKind.Specific); return thisType; } public static Instantiation GetInstantiationThatMeetsConstraints(Instantiation inst, bool allowCanon) { TypeDesc[] resultArray = new TypeDesc[inst.Length]; for (int i = 0; i < inst.Length; i++) { TypeDesc instArg = GetTypeThatMeetsConstraints((GenericParameterDesc)inst[i], allowCanon); if (instArg == null) return default(Instantiation); resultArray[i] = instArg; } return new Instantiation(resultArray); } private static TypeDesc GetTypeThatMeetsConstraints(GenericParameterDesc genericParam, bool allowCanon) { TypeSystemContext context = genericParam.Context; // Universal canon is the best option if it's supported if (allowCanon && context.SupportsUniversalCanon) return context.UniversalCanonType; // Not nullable type is the only thing where we can't substitute reference types GenericConstraints constraints = genericParam.Constraints; if ((constraints & GenericConstraints.NotNullableValueTypeConstraint) != 0) return null; // If canon is allowed, we can use that if (allowCanon && context.SupportsCanon) { foreach (var c in genericParam.TypeConstraints) { // Could be e.g. "where T : U" // We could try to dig into the U and solve it, but that just opens us up to // recursion and it's just not worth it. if (c.IsSignatureVariable) return null; if (!c.IsGCPointer) return null; } return genericParam.Context.CanonType; } // If canon is not allowed, we're limited in our choices. TypeDesc constrainedType = null; foreach (var c in genericParam.TypeConstraints) { // Can't do multiple constraints if (constrainedType != null) return null; // Could be e.g. "where T : IFoo<U>" or "where T : U" if (c.ContainsSignatureVariables()) return null; constrainedType = c; } return constrainedType ?? genericParam.Context.GetWellKnownType(WellKnownType.Object); } public static bool ContainsSignatureVariables(this Instantiation instantiation, bool treatGenericParameterLikeSignatureVariable = false) { foreach (var arg in instantiation) { if (arg.ContainsSignatureVariables(treatGenericParameterLikeSignatureVariable)) return true; } return false; } /// <summary> /// Return true when the type in question is marked with the NonVersionable attribute. /// </summary> /// <param name="type">Type to check</param> /// <returns>True when the type is marked with the non-versionable custom attribute, false otherwise.</returns> public static bool IsNonVersionable(this MetadataType type) { return type.HasCustomAttribute("System.Runtime.Versioning", "NonVersionableAttribute"); } /// <summary> /// Return true when the method is marked as non-versionable. Non-versionable methods /// may be freely inlined into ReadyToRun images even when they don't reside in the /// same version bubble as the module being compiled. /// </summary> /// <param name="method">Method to check</param> /// <returns>True when the method is marked as non-versionable, false otherwise.</returns> public static bool IsNonVersionable(this MethodDesc method) { return method.HasCustomAttribute("System.Runtime.Versioning", "NonVersionableAttribute"); } /// <summary> /// Returns true if <paramref name="method"/> is an actual native entrypoint. /// There's a distinction between when a method reports it's a PInvoke in the metadata /// versus how it's treated in the compiler. For many PInvoke methods the compiler will generate /// an IL body. The methods with an IL method body shouldn't be treated as PInvoke within the compiler. /// </summary> public static bool IsRawPInvoke(this MethodDesc method) { return method.IsPInvoke && (method is Internal.IL.Stubs.PInvokeTargetNativeMethod); } public static bool IsDynamicInterfaceCastableImplementation(this MetadataType interfaceType) { Debug.Assert(interfaceType.IsInterface); return interfaceType.HasCustomAttribute("System.Runtime.InteropServices", "DynamicInterfaceCastableImplementationAttribute"); } } }
1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/coreclr/tools/Common/TypeSystem/Common/MetadataVirtualMethodAlgorithm.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Diagnostics; namespace Internal.TypeSystem { public class MetadataVirtualMethodAlgorithm : VirtualMethodAlgorithm { private class MethodDescHashtable : LockFreeReaderHashtable<MethodDesc, MethodDesc> { protected override int GetKeyHashCode(MethodDesc key) { return key.GetHashCode(); } protected override int GetValueHashCode(MethodDesc value) { return value.GetHashCode(); } protected override bool CompareKeyToValue(MethodDesc key, MethodDesc value) { Debug.Assert(key.Context == value.Context); return object.ReferenceEquals(key, value); } protected override bool CompareValueToValue(MethodDesc value1, MethodDesc value2) { Debug.Assert(value1.Context == value2.Context); return object.ReferenceEquals(value1, value2); } protected override MethodDesc CreateValueFromKey(MethodDesc key) { return key; } } private class UnificationGroup { private MethodDesc[] _members = MethodDesc.EmptyMethods; private int _memberCount; private MethodDesc[] _methodsRequiringSlotUnification = MethodDesc.EmptyMethods; private int _methodsRequiringSlotUnificationCount; /// <summary> /// Custom enumerator struct for Unification group. Makes enumeration require 0 allocations. /// </summary> public struct Enumerator { private MethodDesc[] _arrayToEnumerate; private int _index; private MethodDesc _current; internal Enumerator(MethodDesc[] arrayToEnumerate) { _arrayToEnumerate = arrayToEnumerate; _index = 0; _current = default(MethodDesc); } public bool MoveNext() { for (; _index < _arrayToEnumerate.Length; _index++) { if (_arrayToEnumerate[_index] != null) { _current = _arrayToEnumerate[_index]; _index++; return true; } } _current = default(MethodDesc); return false; } public MethodDesc Current { get { return _current; } } } public struct Enumerable { private readonly MethodDesc[] _arrayToEnumerate; public Enumerable(MethodDesc[] arrayToEnumerate) { _arrayToEnumerate = arrayToEnumerate; } public Enumerator GetEnumerator() { return new Enumerator(_arrayToEnumerate); } } public UnificationGroup(MethodDesc definingMethod) { DefiningMethod = definingMethod; // TODO! Add assertion that DefiningMethod is a slot defining method } public MethodDesc DefiningMethod; public Enumerable Members => new Enumerable(_members); public Enumerable MethodsRequiringSlotUnification => new Enumerable(_methodsRequiringSlotUnification); public void AddMethodRequiringSlotUnification(MethodDesc method) { if (RequiresSlotUnification(method)) return; _methodsRequiringSlotUnificationCount++; if (_methodsRequiringSlotUnificationCount >= _methodsRequiringSlotUnification.Length) { Array.Resize(ref _methodsRequiringSlotUnification, Math.Max(_methodsRequiringSlotUnification.Length * 2, 2)); } _methodsRequiringSlotUnification[_methodsRequiringSlotUnificationCount - 1] = method; } public bool RequiresSlotUnification(MethodDesc method) { for (int i = 0; i < _methodsRequiringSlotUnificationCount; i++) { if (_methodsRequiringSlotUnification[i] == method) return true; } return false; } public void SetDefiningMethod(MethodDesc newDefiningMethod) { // Do not change the defining method if its the same as // one of the members, or it isn't a change at all if (!IsInGroup(newDefiningMethod) && DefiningMethod != newDefiningMethod) { // When we set the defining method, ensure that the old defining method isn't removed from the group MethodDesc oldDefiningMethod = DefiningMethod; DefiningMethod = newDefiningMethod; AddToGroup(oldDefiningMethod); // TODO! Add assertion that DefiningMethod is a slot defining method } } public void AddToGroup(MethodDesc method) { if (method == DefiningMethod) return; if (!IsInGroup(method)) { _memberCount++; if (_memberCount >= _members.Length) { Array.Resize(ref _members, Math.Max(_members.Length * 2, 2)); } for (int i = 0; i < _members.Length; i++) { if (_members[i] == null) { _members[i] = method; break; } } } } public void RemoveFromGroup(MethodDesc method) { if (method == DefiningMethod) throw new BadImageFormatException(); for (int i = 0; i < _members.Length; i++) { if (_members[i] == method) { _memberCount--; _members[i] = null; return; } } } public bool IsInGroupOrIsDefiningSlot(MethodDesc method) { if (DefiningMethod == method) return true; return IsInGroup(method); } public bool IsInGroup(MethodDesc method) { for (int i = 0; i < _members.Length; i++) { if (_members[i] == method) return true; } return false; } } public override MethodDesc FindVirtualFunctionTargetMethodOnObjectType(MethodDesc targetMethod, TypeDesc objectType) { return FindVirtualFunctionTargetMethodOnObjectType(targetMethod, (MetadataType)objectType); } /// <summary> /// Resolve a virtual function call (to a virtual method, not an interface method) /// </summary> /// <param name="targetMethod"></param> /// <param name="objectType"></param> /// <returns>The override of the virtual method that should be called</returns> private static MethodDesc FindVirtualFunctionTargetMethodOnObjectType(MethodDesc targetMethod, MetadataType objectType) { // Step 1, convert objectType to uninstantiated form MetadataType uninstantiatedType = objectType; MethodDesc initialTargetMethod = targetMethod; InstantiatedType initialInstantiatedType = objectType as InstantiatedType; if (initialInstantiatedType != null) { uninstantiatedType = (MetadataType)initialInstantiatedType.GetTypeDefinition(); } // Step 2, convert targetMethod to method in type hierarchy of uninstantiated form targetMethod = targetMethod.GetMethodDefinition(); if (uninstantiatedType != objectType) { targetMethod = uninstantiatedType.FindMethodOnTypeWithMatchingTypicalMethod(targetMethod); } // Step 3, find unification group of target method UnificationGroup group = new UnificationGroup(FindSlotDefiningMethodForVirtualMethod(targetMethod)); FindBaseUnificationGroup(uninstantiatedType, group); // Step 4, name/sig match virtual function resolve MethodDesc resolutionTarget = FindNameSigOverrideForVirtualMethod(group.DefiningMethod, uninstantiatedType); if (resolutionTarget == null) return null; // Step 5, convert resolution target from uninstantiated form target to objecttype target, // and instantiate as appropriate if (uninstantiatedType != objectType) { resolutionTarget = objectType.FindMethodOnTypeWithMatchingTypicalMethod(resolutionTarget); } if (initialTargetMethod.HasInstantiation) { resolutionTarget = resolutionTarget.MakeInstantiatedMethod(initialTargetMethod.Instantiation); } return resolutionTarget; } private static bool IsInterfaceImplementedOnType(MetadataType type, MetadataType interfaceType) { foreach (TypeDesc iface in type.RuntimeInterfaces) { if (iface == interfaceType) return true; } return false; } private static MethodDesc FindImplFromDeclFromMethodImpls(MetadataType type, MethodDesc decl) { MethodImplRecord[] foundMethodImpls = type.FindMethodsImplWithMatchingDeclName(decl.Name); if (foundMethodImpls == null) return null; bool interfaceDecl = decl.OwningType.IsInterface; foreach (MethodImplRecord record in foundMethodImpls) { MethodDesc recordDecl = record.Decl; if (interfaceDecl != recordDecl.OwningType.IsInterface) continue; if (!interfaceDecl) recordDecl = FindSlotDefiningMethodForVirtualMethod(recordDecl); if (recordDecl == decl) { return FindSlotDefiningMethodForVirtualMethod(record.Body); } } return null; } private static bool IsInterfaceExplicitlyImplementedOnType(MetadataType type, MetadataType interfaceType) { foreach (TypeDesc iface in type.ExplicitlyImplementedInterfaces) { if (iface == interfaceType) return true; } return false; } /// <summary> /// Find matching a matching method by name and sig on a type. (Restricted to virtual methods only) /// </summary> /// <param name="targetMethod"></param> /// <param name="currentType"></param> /// <param name="reverseMethodSearch">Used to control the order of the search. For historical purposes to /// match .NET Framework behavior, this is typically true, but not always. There is no particular rationale /// for the particular orders other than to attempt to be consistent in virtual method override behavior /// betweeen runtimes.</param> /// <param name="nameSigMatchMethodIsValidCandidate"></param> /// <returns></returns> private static MethodDesc FindMatchingVirtualMethodOnTypeByNameAndSig(MethodDesc targetMethod, DefType currentType, bool reverseMethodSearch, Func<MethodDesc, MethodDesc, bool> nameSigMatchMethodIsValidCandidate) { string name = targetMethod.Name; MethodSignature sig = targetMethod.Signature; MethodDesc implMethod = null; foreach (MethodDesc candidate in currentType.GetAllVirtualMethods()) { if (candidate.Name == name) { if (candidate.Signature.Equals(sig)) { if (nameSigMatchMethodIsValidCandidate == null || nameSigMatchMethodIsValidCandidate(targetMethod, candidate)) { implMethod = candidate; // If reverseMethodSearch is enabled, we want to find the last match on this type, not the first // (reverseMethodSearch is used for most matches except for searches for name/sig method matches for interface methods on the most derived type) if (!reverseMethodSearch) return implMethod; } } } } return implMethod; } // This function is used to find the name/sig based override for a given method. This method ignores all // method impl's as it assumes they have been resolved. The algorithm is simple. Walk to the base type looking // for overrides by name and signature. If one is found, return it as long as the newslot defining method // for the found method matches that of the target method. private static MethodDesc FindNameSigOverrideForVirtualMethod(MethodDesc targetMethod, MetadataType currentType) { while (currentType != null) { MethodDesc nameSigOverride = FindMatchingVirtualMethodOnTypeByNameAndSigWithSlotCheck(targetMethod, currentType, reverseMethodSearch:true); if (nameSigOverride != null) { return nameSigOverride; } currentType = currentType.MetadataBaseType; } return null; } // This function looks for the base type method that defines the slot for a method // This is either the newslot method most derived that is in the parent hierarchy of method // or the least derived method that isn't newslot that matches by name and sig. public static MethodDesc FindSlotDefiningMethodForVirtualMethod(MethodDesc method) { if (method == null) return method; DefType currentType = method.OwningType.BaseType; // Loop until a newslot method is found while ((currentType != null) && !method.IsNewSlot) { MethodDesc foundMethod = FindMatchingVirtualMethodOnTypeByNameAndSig(method, currentType, reverseMethodSearch: true, nameSigMatchMethodIsValidCandidate:null); if (foundMethod != null) { method = foundMethod; } currentType = currentType.BaseType; } // Newslot method found, or if not the least derived method that matches by name and // sig is to be returned. return method; } /// <summary> /// Find matching a matching method by name and sig on a type. (Restricted to virtual methods only) Only search amongst methods with the same vtable slot. /// </summary> /// <param name="method"></param> /// <param name="currentType"></param> /// <param name="reverseMethodSearch">Used to control the order of the search. For historical purposes to /// match .NET Framework behavior, this is typically true, but not always. There is no particular rationale /// for the particular orders other than to attempt to be consistent in virtual method override behavior /// betweeen runtimes.</param> /// <returns></returns> private static MethodDesc FindMatchingVirtualMethodOnTypeByNameAndSigWithSlotCheck(MethodDesc method, DefType currentType, bool reverseMethodSearch) { return FindMatchingVirtualMethodOnTypeByNameAndSig(method, currentType, reverseMethodSearch, nameSigMatchMethodIsValidCandidate: s_VerifyMethodsHaveTheSameVirtualSlot); } private static Func<MethodDesc, MethodDesc, bool> s_VerifyMethodsHaveTheSameVirtualSlot = VerifyMethodsHaveTheSameVirtualSlot; // Return true if the slot that defines methodToVerify matches slotDefiningMethod private static bool VerifyMethodsHaveTheSameVirtualSlot(MethodDesc slotDefiningMethod, MethodDesc methodToVerify) { MethodDesc slotDefiningMethodOfMethodToVerify = FindSlotDefiningMethodForVirtualMethod(methodToVerify); return slotDefiningMethodOfMethodToVerify == slotDefiningMethod; } private static void FindBaseUnificationGroup(MetadataType currentType, UnificationGroup unificationGroup) { MethodDesc originalDefiningMethod = unificationGroup.DefiningMethod; MethodDesc methodImpl = FindImplFromDeclFromMethodImpls(currentType, unificationGroup.DefiningMethod); if (methodImpl != null) { if (methodImpl.RequiresSlotUnification()) { unificationGroup.AddMethodRequiringSlotUnification(unificationGroup.DefiningMethod); unificationGroup.AddMethodRequiringSlotUnification(methodImpl); } unificationGroup.SetDefiningMethod(methodImpl); } MethodDesc nameSigMatchMethod = FindMatchingVirtualMethodOnTypeByNameAndSigWithSlotCheck(unificationGroup.DefiningMethod, currentType, reverseMethodSearch: true); MetadataType baseType = currentType.MetadataBaseType; // Unless the current type has a name/sig match for the group, look to the base type to define the unification group further if ((nameSigMatchMethod == null) && (baseType != null)) { FindBaseUnificationGroup(baseType, unificationGroup); } Debug.Assert(unificationGroup.IsInGroupOrIsDefiningSlot(originalDefiningMethod)); // Now, we have the unification group from the type, or have discovered its defined on the current type. // Adjust the group to contain all of the elements that are added to it on this type, remove the components that // have separated themselves from the group // Start with removing methods that separated themselves from the group via name/sig matches MethodDescHashtable separatedMethods = null; foreach (MethodDesc memberMethod in unificationGroup.Members) { // If a method is both overriden via MethodImpl and name/sig, we don't remove it from the unification list // as the local MethodImpl takes priority over the name/sig match, and prevents the slot disunificaiton if (FindSlotDefiningMethodForVirtualMethod(memberMethod) == FindSlotDefiningMethodForVirtualMethod(originalDefiningMethod)) continue; MethodDesc nameSigMatchMemberMethod = FindMatchingVirtualMethodOnTypeByNameAndSigWithSlotCheck(memberMethod, currentType, reverseMethodSearch: true); if (nameSigMatchMemberMethod != null && nameSigMatchMemberMethod != memberMethod) { if (separatedMethods == null) separatedMethods = new MethodDescHashtable(); separatedMethods.AddOrGetExisting(memberMethod); } } if (separatedMethods != null) { foreach (MethodDesc separatedMethod in MethodDescHashtable.Enumerator.Get(separatedMethods)) { unificationGroup.RemoveFromGroup(separatedMethod); } } // Next find members which have separated or added themselves to the group via MethodImpls foreach (MethodImplRecord methodImplRecord in currentType.VirtualMethodImplsForType) { MethodDesc declSlot = FindSlotDefiningMethodForVirtualMethod(methodImplRecord.Decl); MethodDesc implSlot = FindSlotDefiningMethodForVirtualMethod(methodImplRecord.Body); if (unificationGroup.IsInGroup(declSlot) && !unificationGroup.IsInGroupOrIsDefiningSlot(implSlot)) { unificationGroup.RemoveFromGroup(declSlot); if (separatedMethods == null) separatedMethods = new MethodDescHashtable(); separatedMethods.AddOrGetExisting(declSlot); if (unificationGroup.RequiresSlotUnification(declSlot) || implSlot.RequiresSlotUnification()) { if (implSlot.Signature.EqualsWithCovariantReturnType(unificationGroup.DefiningMethod.Signature)) { unificationGroup.AddMethodRequiringSlotUnification(declSlot); unificationGroup.AddMethodRequiringSlotUnification(implSlot); unificationGroup.SetDefiningMethod(implSlot); } } continue; } if (!unificationGroup.IsInGroupOrIsDefiningSlot(declSlot)) { if (unificationGroup.IsInGroupOrIsDefiningSlot(implSlot)) { // Add decl to group. // To do so, we need to have the Unification Group of the decl slot, as it may have multiple members itself UnificationGroup addDeclGroup = new UnificationGroup(declSlot); FindBaseUnificationGroup(baseType, addDeclGroup); Debug.Assert( addDeclGroup.IsInGroupOrIsDefiningSlot(declSlot) || (addDeclGroup.RequiresSlotUnification(declSlot) && addDeclGroup.DefiningMethod.Signature.EqualsWithCovariantReturnType(declSlot.Signature))); foreach (MethodDesc methodImplRequiredToRemainInEffect in addDeclGroup.MethodsRequiringSlotUnification) { unificationGroup.AddMethodRequiringSlotUnification(methodImplRequiredToRemainInEffect); } // Add all members from the decl's unification group except for ones that have been seperated by name/sig matches // or previously processed methodimpls. NOTE: This implies that method impls are order dependent. if (separatedMethods == null || !separatedMethods.Contains(addDeclGroup.DefiningMethod)) { unificationGroup.AddToGroup(addDeclGroup.DefiningMethod); } foreach (MethodDesc addDeclGroupMemberMethod in addDeclGroup.Members) { if (separatedMethods == null || !separatedMethods.Contains(addDeclGroupMemberMethod)) { unificationGroup.AddToGroup(addDeclGroupMemberMethod); } } if (unificationGroup.RequiresSlotUnification(declSlot)) { unificationGroup.AddMethodRequiringSlotUnification(implSlot); } else if (implSlot == unificationGroup.DefiningMethod && implSlot.RequiresSlotUnification()) { unificationGroup.AddMethodRequiringSlotUnification(declSlot); unificationGroup.AddMethodRequiringSlotUnification(implSlot); } } else if (unificationGroup.RequiresSlotUnification(declSlot)) { if (implSlot.Signature.EqualsWithCovariantReturnType(unificationGroup.DefiningMethod.Signature)) { unificationGroup.AddMethodRequiringSlotUnification(implSlot); unificationGroup.SetDefiningMethod(implSlot); } } } } } public override MethodDesc ResolveInterfaceMethodToVirtualMethodOnType(MethodDesc interfaceMethod, TypeDesc currentType) { return ResolveInterfaceMethodToVirtualMethodOnType(interfaceMethod, (MetadataType)currentType); } public override MethodDesc ResolveVariantInterfaceMethodToVirtualMethodOnType(MethodDesc interfaceMethod, TypeDesc currentType) { return ResolveVariantInterfaceMethodToVirtualMethodOnType(interfaceMethod, (MetadataType)currentType); } //////////////////////// INTERFACE RESOLUTION //Interface function resolution // Interface function resolution follows the following rules // 1. Apply any method impl that may exist, if once of these exists, resolve to target immediately. // 2. If an interface is explicitly defined on a type, then attempt to perform a namesig match on the // current type to resolve.If the interface isn't resolved, if it isn't implemented on a base type, // scan all base types for name / sig matches. // 3. If implicitly defined, attempt to perform a namesig match if the interface method implementation // has not been found on some base type. // The above will resolve an interface to a virtual method slot. From there perform virtual resolution // to find out the actual target.Note, to preserve correct behavior in the presence of variance, this // function returns null if the interface method implementation is not defined by the current type in // the hierarchy.For variance to work correctly, this requires that interfaces be queried in correct order. // See current interface call resolution for details on how that happens. private static MethodDesc ResolveInterfaceMethodToVirtualMethodOnType(MethodDesc interfaceMethod, MetadataType currentType) { if (currentType.IsInterface) return null; MethodDesc methodImpl = FindImplFromDeclFromMethodImpls(currentType, interfaceMethod); if (methodImpl != null) return methodImpl; MetadataType interfaceType = (MetadataType)interfaceMethod.OwningType; // If interface is explicitly defined on a type, search for a name/sig match. bool foundExplicitInterface = IsInterfaceExplicitlyImplementedOnType(currentType, interfaceType); MetadataType baseType = currentType.MetadataBaseType; if (foundExplicitInterface) { MethodDesc foundOnCurrentType = FindMatchingVirtualMethodOnTypeByNameAndSig(interfaceMethod, currentType, reverseMethodSearch: false, /* When searching for name/sig overrides on a type that explicitly defines an interface, search through the type in the forward direction*/ nameSigMatchMethodIsValidCandidate :null); foundOnCurrentType = FindSlotDefiningMethodForVirtualMethod(foundOnCurrentType); if (baseType == null) return foundOnCurrentType; if (foundOnCurrentType == null && (ResolveInterfaceMethodToVirtualMethodOnType(interfaceMethod, baseType) == null)) { // TODO! Does this handle the case where the base type explicitly implements the interface, but is abstract // and doesn't actually have an implementation? if (!IsInterfaceImplementedOnType(baseType, interfaceType)) { return FindNameSigOverrideForInterfaceMethodRecursive(interfaceMethod, baseType); } } return foundOnCurrentType; } else { // Implicit interface case if (!IsInterfaceImplementedOnType(currentType, interfaceType)) { // If the interface isn't implemented on this type at all, don't go searching return null; } // This is an implicitly implemented interface method. Only return a vlaue if this is the first type in the class // hierarchy that implements the interface. NOTE: If we pay attention to whether or not the parent type is // abstract or not, we may be able to be more efficient here, but let's skip that for now MethodDesc baseClassImplementationOfInterfaceMethod = ResolveInterfaceMethodToVirtualMethodOnTypeRecursive(interfaceMethod, baseType); if (baseClassImplementationOfInterfaceMethod != null) { return null; } else { MethodDesc foundOnCurrentType = FindMatchingVirtualMethodOnTypeByNameAndSig(interfaceMethod, currentType, reverseMethodSearch: false, /* When searching for name/sig overrides on a type that is the first type in the hierarchy to require the interface, search through the type in the forward direction*/ nameSigMatchMethodIsValidCandidate: null); foundOnCurrentType = FindSlotDefiningMethodForVirtualMethod(foundOnCurrentType); if (foundOnCurrentType != null) return foundOnCurrentType; return FindNameSigOverrideForInterfaceMethodRecursive(interfaceMethod, baseType); } } } public static MethodDesc ResolveVariantInterfaceMethodToVirtualMethodOnType(MethodDesc interfaceMethod, MetadataType currentType) { MetadataType interfaceType = (MetadataType)interfaceMethod.OwningType; bool foundInterface = IsInterfaceImplementedOnType(currentType, interfaceType); MethodDesc implMethod; if (foundInterface) { implMethod = ResolveInterfaceMethodToVirtualMethodOnType(interfaceMethod, currentType); if (implMethod != null) return implMethod; } foreach (TypeDesc iface in currentType.RuntimeInterfaces) { if (iface.HasSameTypeDefinition(interfaceType) && iface.CanCastTo(interfaceType)) { implMethod = iface.FindMethodOnTypeWithMatchingTypicalMethod(interfaceMethod); Debug.Assert(implMethod != null); implMethod = ResolveInterfaceMethodToVirtualMethodOnType(implMethod, currentType); if (implMethod != null) return implMethod; } } return null; } // Helper routine used during implicit interface implementation discovery private static MethodDesc ResolveInterfaceMethodToVirtualMethodOnTypeRecursive(MethodDesc interfaceMethod, MetadataType currentType) { while (true) { if (currentType == null) return null; MetadataType interfaceType = (MetadataType)interfaceMethod.OwningType; if (!IsInterfaceImplementedOnType(currentType, interfaceType)) { // If the interface isn't implemented on this type at all, don't go searching return null; } MethodDesc currentTypeInterfaceResolution = ResolveInterfaceMethodToVirtualMethodOnType(interfaceMethod, currentType); if (currentTypeInterfaceResolution != null) return currentTypeInterfaceResolution; currentType = currentType.MetadataBaseType; } } // Perform a name/sig match for a virtual method across the specified types and all of the types parents. private static MethodDesc FindNameSigOverrideForInterfaceMethodRecursive(MethodDesc interfaceMethod, MetadataType currentType) { while (true) { if (currentType == null) return null; MethodDesc nameSigOverride = FindMatchingVirtualMethodOnTypeByNameAndSig(interfaceMethod, currentType, reverseMethodSearch: true, /* When searching for a name sig match for an interface on parent types search in reverse order of declaration */ nameSigMatchMethodIsValidCandidate:null); if (nameSigOverride != null) { return FindSlotDefiningMethodForVirtualMethod(nameSigOverride); } currentType = currentType.MetadataBaseType; } } public override DefaultInterfaceMethodResolution ResolveInterfaceMethodToDefaultImplementationOnType(MethodDesc interfaceMethod, TypeDesc currentType, out MethodDesc impl) { return ResolveInterfaceMethodToDefaultImplementationOnType(interfaceMethod, (MetadataType)currentType, out impl); } private static DefaultInterfaceMethodResolution ResolveInterfaceMethodToDefaultImplementationOnType(MethodDesc interfaceMethod, MetadataType currentType, out MethodDesc impl) { TypeDesc interfaceMethodOwningType = interfaceMethod.OwningType; MetadataType mostSpecificInterface = null; bool diamondCase = false; impl = null; DefType[] consideredInterfaces; if (!currentType.IsInterface) { // If this is not an interface, only things on the interface list could provide // default implementations. consideredInterfaces = currentType.RuntimeInterfaces; } else { // If we're asking about an interface, include the interface in the list. consideredInterfaces = new DefType[currentType.RuntimeInterfaces.Length + 1]; Array.Copy(currentType.RuntimeInterfaces, consideredInterfaces, currentType.RuntimeInterfaces.Length); consideredInterfaces[consideredInterfaces.Length - 1] = (DefType)currentType.InstantiateAsOpen(); } foreach (MetadataType runtimeInterface in consideredInterfaces) { if (runtimeInterface == interfaceMethodOwningType) { // Also consider the default interface method implementation on the interface itself // if we don't have anything else yet if (mostSpecificInterface == null && !interfaceMethod.IsAbstract) { mostSpecificInterface = runtimeInterface; impl = interfaceMethod; } } else if (Array.IndexOf(runtimeInterface.RuntimeInterfaces, interfaceMethodOwningType) != -1) { // This interface might provide a default implementation MethodImplRecord[] possibleImpls = runtimeInterface.FindMethodsImplWithMatchingDeclName(interfaceMethod.Name); if (possibleImpls != null) { foreach (MethodImplRecord implRecord in possibleImpls) { if (implRecord.Decl == interfaceMethod) { // This interface provides a default implementation. // Is it also most specific? if (mostSpecificInterface == null || Array.IndexOf(runtimeInterface.RuntimeInterfaces, mostSpecificInterface) != -1) { mostSpecificInterface = runtimeInterface; impl = implRecord.Body; diamondCase = false; } else if (Array.IndexOf(mostSpecificInterface.RuntimeInterfaces, runtimeInterface) == -1) { diamondCase = true; } break; } } } } } if (diamondCase) { impl = null; return DefaultInterfaceMethodResolution.Diamond; } else if (impl == null) { return DefaultInterfaceMethodResolution.None; } else if (impl.IsAbstract) { return DefaultInterfaceMethodResolution.Reabstraction; } return DefaultInterfaceMethodResolution.DefaultImplementation; } public override IEnumerable<MethodDesc> ComputeAllVirtualSlots(TypeDesc type) { return EnumAllVirtualSlots((MetadataType)type); } // Enumerate all possible virtual slots of a type public static IEnumerable<MethodDesc> EnumAllVirtualSlots(MetadataType type) { MethodDescHashtable alreadyEnumerated = new MethodDescHashtable(); if (!type.IsInterface) { do { foreach (MethodDesc m in type.GetAllVirtualMethods()) { MethodDesc possibleVirtual = FindSlotDefiningMethodForVirtualMethod(m); if (!alreadyEnumerated.Contains(possibleVirtual)) { alreadyEnumerated.AddOrGetExisting(possibleVirtual); yield return possibleVirtual; } } type = type.MetadataBaseType; } while (type != null); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Diagnostics; namespace Internal.TypeSystem { public class MetadataVirtualMethodAlgorithm : VirtualMethodAlgorithm { private class MethodDescHashtable : LockFreeReaderHashtable<MethodDesc, MethodDesc> { protected override int GetKeyHashCode(MethodDesc key) { return key.GetHashCode(); } protected override int GetValueHashCode(MethodDesc value) { return value.GetHashCode(); } protected override bool CompareKeyToValue(MethodDesc key, MethodDesc value) { Debug.Assert(key.Context == value.Context); return object.ReferenceEquals(key, value); } protected override bool CompareValueToValue(MethodDesc value1, MethodDesc value2) { Debug.Assert(value1.Context == value2.Context); return object.ReferenceEquals(value1, value2); } protected override MethodDesc CreateValueFromKey(MethodDesc key) { return key; } } private class UnificationGroup { private MethodDesc[] _members = MethodDesc.EmptyMethods; private int _memberCount; private MethodDesc[] _methodsRequiringSlotUnification = MethodDesc.EmptyMethods; private int _methodsRequiringSlotUnificationCount; /// <summary> /// Custom enumerator struct for Unification group. Makes enumeration require 0 allocations. /// </summary> public struct Enumerator { private MethodDesc[] _arrayToEnumerate; private int _index; private MethodDesc _current; internal Enumerator(MethodDesc[] arrayToEnumerate) { _arrayToEnumerate = arrayToEnumerate; _index = 0; _current = default(MethodDesc); } public bool MoveNext() { for (; _index < _arrayToEnumerate.Length; _index++) { if (_arrayToEnumerate[_index] != null) { _current = _arrayToEnumerate[_index]; _index++; return true; } } _current = default(MethodDesc); return false; } public MethodDesc Current { get { return _current; } } } public struct Enumerable { private readonly MethodDesc[] _arrayToEnumerate; public Enumerable(MethodDesc[] arrayToEnumerate) { _arrayToEnumerate = arrayToEnumerate; } public Enumerator GetEnumerator() { return new Enumerator(_arrayToEnumerate); } } public UnificationGroup(MethodDesc definingMethod) { DefiningMethod = definingMethod; // TODO! Add assertion that DefiningMethod is a slot defining method } public MethodDesc DefiningMethod; public Enumerable Members => new Enumerable(_members); public Enumerable MethodsRequiringSlotUnification => new Enumerable(_methodsRequiringSlotUnification); public void AddMethodRequiringSlotUnification(MethodDesc method) { if (RequiresSlotUnification(method)) return; _methodsRequiringSlotUnificationCount++; if (_methodsRequiringSlotUnificationCount >= _methodsRequiringSlotUnification.Length) { Array.Resize(ref _methodsRequiringSlotUnification, Math.Max(_methodsRequiringSlotUnification.Length * 2, 2)); } _methodsRequiringSlotUnification[_methodsRequiringSlotUnificationCount - 1] = method; } public bool RequiresSlotUnification(MethodDesc method) { for (int i = 0; i < _methodsRequiringSlotUnificationCount; i++) { if (_methodsRequiringSlotUnification[i] == method) return true; } return false; } public void SetDefiningMethod(MethodDesc newDefiningMethod) { // Do not change the defining method if its the same as // one of the members, or it isn't a change at all if (!IsInGroup(newDefiningMethod) && DefiningMethod != newDefiningMethod) { // When we set the defining method, ensure that the old defining method isn't removed from the group MethodDesc oldDefiningMethod = DefiningMethod; DefiningMethod = newDefiningMethod; AddToGroup(oldDefiningMethod); // TODO! Add assertion that DefiningMethod is a slot defining method } } public void AddToGroup(MethodDesc method) { if (method == DefiningMethod) return; if (!IsInGroup(method)) { _memberCount++; if (_memberCount >= _members.Length) { Array.Resize(ref _members, Math.Max(_members.Length * 2, 2)); } for (int i = 0; i < _members.Length; i++) { if (_members[i] == null) { _members[i] = method; break; } } } } public void RemoveFromGroup(MethodDesc method) { if (method == DefiningMethod) throw new BadImageFormatException(); for (int i = 0; i < _members.Length; i++) { if (_members[i] == method) { _memberCount--; _members[i] = null; return; } } } public bool IsInGroupOrIsDefiningSlot(MethodDesc method) { if (DefiningMethod == method) return true; return IsInGroup(method); } public bool IsInGroup(MethodDesc method) { for (int i = 0; i < _members.Length; i++) { if (_members[i] == method) return true; } return false; } } public override MethodDesc FindVirtualFunctionTargetMethodOnObjectType(MethodDesc targetMethod, TypeDesc objectType) { return FindVirtualFunctionTargetMethodOnObjectType(targetMethod, (MetadataType)objectType); } /// <summary> /// Resolve a virtual function call (to a virtual method, not an interface method) /// </summary> /// <param name="targetMethod"></param> /// <param name="objectType"></param> /// <returns>The override of the virtual method that should be called</returns> private static MethodDesc FindVirtualFunctionTargetMethodOnObjectType(MethodDesc targetMethod, MetadataType objectType) { // Step 1, convert objectType to uninstantiated form MetadataType uninstantiatedType = objectType; MethodDesc initialTargetMethod = targetMethod; InstantiatedType initialInstantiatedType = objectType as InstantiatedType; if (initialInstantiatedType != null) { uninstantiatedType = (MetadataType)initialInstantiatedType.GetTypeDefinition(); } // Step 2, convert targetMethod to method in type hierarchy of uninstantiated form targetMethod = targetMethod.GetMethodDefinition(); if (uninstantiatedType != objectType) { targetMethod = uninstantiatedType.FindMethodOnTypeWithMatchingTypicalMethod(targetMethod); } // Step 3, find unification group of target method UnificationGroup group = new UnificationGroup(FindSlotDefiningMethodForVirtualMethod(targetMethod)); FindBaseUnificationGroup(uninstantiatedType, group); // Step 4, name/sig match virtual function resolve MethodDesc resolutionTarget = FindNameSigOverrideForVirtualMethod(group.DefiningMethod, uninstantiatedType); if (resolutionTarget == null) return null; // Step 5, convert resolution target from uninstantiated form target to objecttype target, // and instantiate as appropriate if (uninstantiatedType != objectType) { resolutionTarget = objectType.FindMethodOnTypeWithMatchingTypicalMethod(resolutionTarget); } if (initialTargetMethod.HasInstantiation) { resolutionTarget = resolutionTarget.MakeInstantiatedMethod(initialTargetMethod.Instantiation); } return resolutionTarget; } private static bool IsInterfaceImplementedOnType(MetadataType type, MetadataType interfaceType) { foreach (TypeDesc iface in type.RuntimeInterfaces) { if (iface == interfaceType) return true; } return false; } private static MethodDesc FindImplFromDeclFromMethodImpls(MetadataType type, MethodDesc decl) { MethodImplRecord[] foundMethodImpls = type.FindMethodsImplWithMatchingDeclName(decl.Name); if (foundMethodImpls == null) return null; bool interfaceDecl = decl.OwningType.IsInterface; foreach (MethodImplRecord record in foundMethodImpls) { MethodDesc recordDecl = record.Decl; if (interfaceDecl != recordDecl.OwningType.IsInterface) continue; if (!interfaceDecl) recordDecl = FindSlotDefiningMethodForVirtualMethod(recordDecl); if (recordDecl == decl) { return FindSlotDefiningMethodForVirtualMethod(record.Body); } } return null; } private static bool IsInterfaceExplicitlyImplementedOnType(MetadataType type, MetadataType interfaceType) { foreach (TypeDesc iface in type.ExplicitlyImplementedInterfaces) { if (iface == interfaceType) return true; } return false; } /// <summary> /// Find matching a matching method by name and sig on a type. (Restricted to virtual methods only) /// </summary> /// <param name="targetMethod"></param> /// <param name="currentType"></param> /// <param name="reverseMethodSearch">Used to control the order of the search. For historical purposes to /// match .NET Framework behavior, this is typically true, but not always. There is no particular rationale /// for the particular orders other than to attempt to be consistent in virtual method override behavior /// betweeen runtimes.</param> /// <param name="nameSigMatchMethodIsValidCandidate"></param> /// <returns></returns> private static MethodDesc FindMatchingVirtualMethodOnTypeByNameAndSig(MethodDesc targetMethod, DefType currentType, bool reverseMethodSearch, Func<MethodDesc, MethodDesc, bool> nameSigMatchMethodIsValidCandidate) { string name = targetMethod.Name; MethodSignature sig = targetMethod.Signature; MethodDesc implMethod = null; foreach (MethodDesc candidate in currentType.GetAllVirtualMethods()) { if (candidate.Name == name) { if (candidate.Signature.Equals(sig)) { if (nameSigMatchMethodIsValidCandidate == null || nameSigMatchMethodIsValidCandidate(targetMethod, candidate)) { implMethod = candidate; // If reverseMethodSearch is enabled, we want to find the last match on this type, not the first // (reverseMethodSearch is used for most matches except for searches for name/sig method matches for interface methods on the most derived type) if (!reverseMethodSearch) return implMethod; } } } } return implMethod; } // This function is used to find the name/sig based override for a given method. This method ignores all // method impl's as it assumes they have been resolved. The algorithm is simple. Walk to the base type looking // for overrides by name and signature. If one is found, return it as long as the newslot defining method // for the found method matches that of the target method. private static MethodDesc FindNameSigOverrideForVirtualMethod(MethodDesc targetMethod, MetadataType currentType) { while (currentType != null) { MethodDesc nameSigOverride = FindMatchingVirtualMethodOnTypeByNameAndSigWithSlotCheck(targetMethod, currentType, reverseMethodSearch:true); if (nameSigOverride != null) { return nameSigOverride; } currentType = currentType.MetadataBaseType; } return null; } // This function looks for the base type method that defines the slot for a method // This is either the newslot method most derived that is in the parent hierarchy of method // or the least derived method that isn't newslot that matches by name and sig. public static MethodDesc FindSlotDefiningMethodForVirtualMethod(MethodDesc method) { if (method == null) return method; DefType currentType = method.OwningType.BaseType; // Loop until a newslot method is found while ((currentType != null) && !method.IsNewSlot) { MethodDesc foundMethod = FindMatchingVirtualMethodOnTypeByNameAndSig(method, currentType, reverseMethodSearch: true, nameSigMatchMethodIsValidCandidate:null); if (foundMethod != null) { method = foundMethod; } currentType = currentType.BaseType; } // Newslot method found, or if not the least derived method that matches by name and // sig is to be returned. return method; } /// <summary> /// Find matching a matching method by name and sig on a type. (Restricted to virtual methods only) Only search amongst methods with the same vtable slot. /// </summary> /// <param name="method"></param> /// <param name="currentType"></param> /// <param name="reverseMethodSearch">Used to control the order of the search. For historical purposes to /// match .NET Framework behavior, this is typically true, but not always. There is no particular rationale /// for the particular orders other than to attempt to be consistent in virtual method override behavior /// betweeen runtimes.</param> /// <returns></returns> private static MethodDesc FindMatchingVirtualMethodOnTypeByNameAndSigWithSlotCheck(MethodDesc method, DefType currentType, bool reverseMethodSearch) { return FindMatchingVirtualMethodOnTypeByNameAndSig(method, currentType, reverseMethodSearch, nameSigMatchMethodIsValidCandidate: s_VerifyMethodsHaveTheSameVirtualSlot); } private static Func<MethodDesc, MethodDesc, bool> s_VerifyMethodsHaveTheSameVirtualSlot = VerifyMethodsHaveTheSameVirtualSlot; // Return true if the slot that defines methodToVerify matches slotDefiningMethod private static bool VerifyMethodsHaveTheSameVirtualSlot(MethodDesc slotDefiningMethod, MethodDesc methodToVerify) { MethodDesc slotDefiningMethodOfMethodToVerify = FindSlotDefiningMethodForVirtualMethod(methodToVerify); return slotDefiningMethodOfMethodToVerify == slotDefiningMethod; } private static void FindBaseUnificationGroup(MetadataType currentType, UnificationGroup unificationGroup) { MethodDesc originalDefiningMethod = unificationGroup.DefiningMethod; MethodDesc methodImpl = FindImplFromDeclFromMethodImpls(currentType, unificationGroup.DefiningMethod); if (methodImpl != null) { if (methodImpl.RequiresSlotUnification()) { unificationGroup.AddMethodRequiringSlotUnification(unificationGroup.DefiningMethod); unificationGroup.AddMethodRequiringSlotUnification(methodImpl); } unificationGroup.SetDefiningMethod(methodImpl); } MethodDesc nameSigMatchMethod = FindMatchingVirtualMethodOnTypeByNameAndSigWithSlotCheck(unificationGroup.DefiningMethod, currentType, reverseMethodSearch: true); MetadataType baseType = currentType.MetadataBaseType; // Unless the current type has a name/sig match for the group, look to the base type to define the unification group further if ((nameSigMatchMethod == null) && (baseType != null)) { FindBaseUnificationGroup(baseType, unificationGroup); } Debug.Assert(unificationGroup.IsInGroupOrIsDefiningSlot(originalDefiningMethod)); // Now, we have the unification group from the type, or have discovered its defined on the current type. // Adjust the group to contain all of the elements that are added to it on this type, remove the components that // have separated themselves from the group // Start with removing methods that separated themselves from the group via name/sig matches MethodDescHashtable separatedMethods = null; foreach (MethodDesc memberMethod in unificationGroup.Members) { // If a method is both overriden via MethodImpl and name/sig, we don't remove it from the unification list // as the local MethodImpl takes priority over the name/sig match, and prevents the slot disunificaiton if (FindSlotDefiningMethodForVirtualMethod(memberMethod) == FindSlotDefiningMethodForVirtualMethod(originalDefiningMethod)) continue; MethodDesc nameSigMatchMemberMethod = FindMatchingVirtualMethodOnTypeByNameAndSigWithSlotCheck(memberMethod, currentType, reverseMethodSearch: true); if (nameSigMatchMemberMethod != null && nameSigMatchMemberMethod != memberMethod) { if (separatedMethods == null) separatedMethods = new MethodDescHashtable(); separatedMethods.AddOrGetExisting(memberMethod); } } if (separatedMethods != null) { foreach (MethodDesc separatedMethod in MethodDescHashtable.Enumerator.Get(separatedMethods)) { unificationGroup.RemoveFromGroup(separatedMethod); } } // Next find members which have separated or added themselves to the group via MethodImpls foreach (MethodImplRecord methodImplRecord in currentType.VirtualMethodImplsForType) { MethodDesc declSlot = FindSlotDefiningMethodForVirtualMethod(methodImplRecord.Decl); MethodDesc implSlot = FindSlotDefiningMethodForVirtualMethod(methodImplRecord.Body); if (unificationGroup.IsInGroup(declSlot) && !unificationGroup.IsInGroupOrIsDefiningSlot(implSlot)) { unificationGroup.RemoveFromGroup(declSlot); if (separatedMethods == null) separatedMethods = new MethodDescHashtable(); separatedMethods.AddOrGetExisting(declSlot); if (unificationGroup.RequiresSlotUnification(declSlot) || implSlot.RequiresSlotUnification()) { if (implSlot.Signature.EqualsWithCovariantReturnType(unificationGroup.DefiningMethod.Signature)) { unificationGroup.AddMethodRequiringSlotUnification(declSlot); unificationGroup.AddMethodRequiringSlotUnification(implSlot); unificationGroup.SetDefiningMethod(implSlot); } } continue; } if (!unificationGroup.IsInGroupOrIsDefiningSlot(declSlot)) { if (unificationGroup.IsInGroupOrIsDefiningSlot(implSlot)) { // Add decl to group. // To do so, we need to have the Unification Group of the decl slot, as it may have multiple members itself UnificationGroup addDeclGroup = new UnificationGroup(declSlot); FindBaseUnificationGroup(baseType, addDeclGroup); Debug.Assert( addDeclGroup.IsInGroupOrIsDefiningSlot(declSlot) || (addDeclGroup.RequiresSlotUnification(declSlot) && addDeclGroup.DefiningMethod.Signature.EqualsWithCovariantReturnType(declSlot.Signature))); foreach (MethodDesc methodImplRequiredToRemainInEffect in addDeclGroup.MethodsRequiringSlotUnification) { unificationGroup.AddMethodRequiringSlotUnification(methodImplRequiredToRemainInEffect); } // Add all members from the decl's unification group except for ones that have been seperated by name/sig matches // or previously processed methodimpls. NOTE: This implies that method impls are order dependent. if (separatedMethods == null || !separatedMethods.Contains(addDeclGroup.DefiningMethod)) { unificationGroup.AddToGroup(addDeclGroup.DefiningMethod); } foreach (MethodDesc addDeclGroupMemberMethod in addDeclGroup.Members) { if (separatedMethods == null || !separatedMethods.Contains(addDeclGroupMemberMethod)) { unificationGroup.AddToGroup(addDeclGroupMemberMethod); } } if (unificationGroup.RequiresSlotUnification(declSlot)) { unificationGroup.AddMethodRequiringSlotUnification(implSlot); } else if (implSlot == unificationGroup.DefiningMethod && implSlot.RequiresSlotUnification()) { unificationGroup.AddMethodRequiringSlotUnification(declSlot); unificationGroup.AddMethodRequiringSlotUnification(implSlot); } } else if (unificationGroup.RequiresSlotUnification(declSlot)) { if (implSlot.Signature.EqualsWithCovariantReturnType(unificationGroup.DefiningMethod.Signature)) { unificationGroup.AddMethodRequiringSlotUnification(implSlot); unificationGroup.SetDefiningMethod(implSlot); } } } } } public override MethodDesc ResolveInterfaceMethodToVirtualMethodOnType(MethodDesc interfaceMethod, TypeDesc currentType) { return ResolveInterfaceMethodToVirtualMethodOnType(interfaceMethod, (MetadataType)currentType); } public override MethodDesc ResolveVariantInterfaceMethodToVirtualMethodOnType(MethodDesc interfaceMethod, TypeDesc currentType) { return ResolveVariantInterfaceMethodToVirtualMethodOnType(interfaceMethod, (MetadataType)currentType); } public override MethodDesc ResolveInterfaceMethodToStaticVirtualMethodOnType(MethodDesc interfaceMethod, TypeDesc currentType) { return ResolveInterfaceMethodToStaticVirtualMethodOnType(interfaceMethod, (MetadataType)currentType); } public override MethodDesc ResolveVariantInterfaceMethodToStaticVirtualMethodOnType(MethodDesc interfaceMethod, TypeDesc currentType) { return ResolveVariantInterfaceMethodToStaticVirtualMethodOnType(interfaceMethod, (MetadataType)currentType); } //////////////////////// INTERFACE RESOLUTION //Interface function resolution // Interface function resolution follows the following rules // 1. Apply any method impl that may exist, if once of these exists, resolve to target immediately. // 2. If an interface is explicitly defined on a type, then attempt to perform a namesig match on the // current type to resolve.If the interface isn't resolved, if it isn't implemented on a base type, // scan all base types for name / sig matches. // 3. If implicitly defined, attempt to perform a namesig match if the interface method implementation // has not been found on some base type. // The above will resolve an interface to a virtual method slot. From there perform virtual resolution // to find out the actual target.Note, to preserve correct behavior in the presence of variance, this // function returns null if the interface method implementation is not defined by the current type in // the hierarchy.For variance to work correctly, this requires that interfaces be queried in correct order. // See current interface call resolution for details on how that happens. private static MethodDesc ResolveInterfaceMethodToVirtualMethodOnType(MethodDesc interfaceMethod, MetadataType currentType) { Debug.Assert(!interfaceMethod.Signature.IsStatic); if (currentType.IsInterface) return null; MethodDesc methodImpl = FindImplFromDeclFromMethodImpls(currentType, interfaceMethod); if (methodImpl != null) return methodImpl; MetadataType interfaceType = (MetadataType)interfaceMethod.OwningType; // If interface is explicitly defined on a type, search for a name/sig match. bool foundExplicitInterface = IsInterfaceExplicitlyImplementedOnType(currentType, interfaceType); MetadataType baseType = currentType.MetadataBaseType; if (foundExplicitInterface) { MethodDesc foundOnCurrentType = FindMatchingVirtualMethodOnTypeByNameAndSig(interfaceMethod, currentType, reverseMethodSearch: false, /* When searching for name/sig overrides on a type that explicitly defines an interface, search through the type in the forward direction*/ nameSigMatchMethodIsValidCandidate :null); foundOnCurrentType = FindSlotDefiningMethodForVirtualMethod(foundOnCurrentType); if (baseType == null) return foundOnCurrentType; if (foundOnCurrentType == null && (ResolveInterfaceMethodToVirtualMethodOnType(interfaceMethod, baseType) == null)) { // TODO! Does this handle the case where the base type explicitly implements the interface, but is abstract // and doesn't actually have an implementation? if (!IsInterfaceImplementedOnType(baseType, interfaceType)) { return FindNameSigOverrideForInterfaceMethodRecursive(interfaceMethod, baseType); } } return foundOnCurrentType; } else { // Implicit interface case if (!IsInterfaceImplementedOnType(currentType, interfaceType)) { // If the interface isn't implemented on this type at all, don't go searching return null; } // This is an implicitly implemented interface method. Only return a vlaue if this is the first type in the class // hierarchy that implements the interface. NOTE: If we pay attention to whether or not the parent type is // abstract or not, we may be able to be more efficient here, but let's skip that for now MethodDesc baseClassImplementationOfInterfaceMethod = ResolveInterfaceMethodToVirtualMethodOnTypeRecursive(interfaceMethod, baseType); if (baseClassImplementationOfInterfaceMethod != null) { return null; } else { MethodDesc foundOnCurrentType = FindMatchingVirtualMethodOnTypeByNameAndSig(interfaceMethod, currentType, reverseMethodSearch: false, /* When searching for name/sig overrides on a type that is the first type in the hierarchy to require the interface, search through the type in the forward direction*/ nameSigMatchMethodIsValidCandidate: null); foundOnCurrentType = FindSlotDefiningMethodForVirtualMethod(foundOnCurrentType); if (foundOnCurrentType != null) return foundOnCurrentType; return FindNameSigOverrideForInterfaceMethodRecursive(interfaceMethod, baseType); } } } public static MethodDesc ResolveVariantInterfaceMethodToVirtualMethodOnType(MethodDesc interfaceMethod, MetadataType currentType) { Debug.Assert(!interfaceMethod.Signature.IsStatic); MetadataType interfaceType = (MetadataType)interfaceMethod.OwningType; bool foundInterface = IsInterfaceImplementedOnType(currentType, interfaceType); MethodDesc implMethod; if (foundInterface) { implMethod = ResolveInterfaceMethodToVirtualMethodOnType(interfaceMethod, currentType); if (implMethod != null) return implMethod; } foreach (TypeDesc iface in currentType.RuntimeInterfaces) { if (iface.HasSameTypeDefinition(interfaceType) && iface.CanCastTo(interfaceType)) { implMethod = iface.FindMethodOnTypeWithMatchingTypicalMethod(interfaceMethod); Debug.Assert(implMethod != null); implMethod = ResolveInterfaceMethodToVirtualMethodOnType(implMethod, currentType); if (implMethod != null) return implMethod; } } return null; } // Helper routine used during implicit interface implementation discovery private static MethodDesc ResolveInterfaceMethodToVirtualMethodOnTypeRecursive(MethodDesc interfaceMethod, MetadataType currentType) { while (true) { if (currentType == null) return null; MetadataType interfaceType = (MetadataType)interfaceMethod.OwningType; if (!IsInterfaceImplementedOnType(currentType, interfaceType)) { // If the interface isn't implemented on this type at all, don't go searching return null; } MethodDesc currentTypeInterfaceResolution = ResolveInterfaceMethodToVirtualMethodOnType(interfaceMethod, currentType); if (currentTypeInterfaceResolution != null) return currentTypeInterfaceResolution; currentType = currentType.MetadataBaseType; } } // Perform a name/sig match for a virtual method across the specified types and all of the types parents. private static MethodDesc FindNameSigOverrideForInterfaceMethodRecursive(MethodDesc interfaceMethod, MetadataType currentType) { while (true) { if (currentType == null) return null; MethodDesc nameSigOverride = FindMatchingVirtualMethodOnTypeByNameAndSig(interfaceMethod, currentType, reverseMethodSearch: true, /* When searching for a name sig match for an interface on parent types search in reverse order of declaration */ nameSigMatchMethodIsValidCandidate:null); if (nameSigOverride != null) { return FindSlotDefiningMethodForVirtualMethod(nameSigOverride); } currentType = currentType.MetadataBaseType; } } public override DefaultInterfaceMethodResolution ResolveInterfaceMethodToDefaultImplementationOnType(MethodDesc interfaceMethod, TypeDesc currentType, out MethodDesc impl) { return ResolveInterfaceMethodToDefaultImplementationOnType(interfaceMethod, (MetadataType)currentType, out impl); } private static DefaultInterfaceMethodResolution ResolveInterfaceMethodToDefaultImplementationOnType(MethodDesc interfaceMethod, MetadataType currentType, out MethodDesc impl) { TypeDesc interfaceMethodOwningType = interfaceMethod.OwningType; MetadataType mostSpecificInterface = null; bool diamondCase = false; impl = null; DefType[] consideredInterfaces; if (!currentType.IsInterface) { // If this is not an interface, only things on the interface list could provide // default implementations. consideredInterfaces = currentType.RuntimeInterfaces; } else { // If we're asking about an interface, include the interface in the list. consideredInterfaces = new DefType[currentType.RuntimeInterfaces.Length + 1]; Array.Copy(currentType.RuntimeInterfaces, consideredInterfaces, currentType.RuntimeInterfaces.Length); consideredInterfaces[consideredInterfaces.Length - 1] = (DefType)currentType.InstantiateAsOpen(); } foreach (MetadataType runtimeInterface in consideredInterfaces) { if (runtimeInterface == interfaceMethodOwningType) { // Also consider the default interface method implementation on the interface itself // if we don't have anything else yet if (mostSpecificInterface == null && !interfaceMethod.IsAbstract) { mostSpecificInterface = runtimeInterface; impl = interfaceMethod; } } else if (Array.IndexOf(runtimeInterface.RuntimeInterfaces, interfaceMethodOwningType) != -1) { // This interface might provide a default implementation MethodImplRecord[] possibleImpls = runtimeInterface.FindMethodsImplWithMatchingDeclName(interfaceMethod.Name); if (possibleImpls != null) { foreach (MethodImplRecord implRecord in possibleImpls) { if (implRecord.Decl == interfaceMethod) { // This interface provides a default implementation. // Is it also most specific? if (mostSpecificInterface == null || Array.IndexOf(runtimeInterface.RuntimeInterfaces, mostSpecificInterface) != -1) { mostSpecificInterface = runtimeInterface; impl = implRecord.Body; diamondCase = false; } else if (Array.IndexOf(mostSpecificInterface.RuntimeInterfaces, runtimeInterface) == -1) { diamondCase = true; } break; } } } } } if (diamondCase) { impl = null; return DefaultInterfaceMethodResolution.Diamond; } else if (impl == null) { return DefaultInterfaceMethodResolution.None; } else if (impl.IsAbstract) { return DefaultInterfaceMethodResolution.Reabstraction; } return DefaultInterfaceMethodResolution.DefaultImplementation; } public override IEnumerable<MethodDesc> ComputeAllVirtualSlots(TypeDesc type) { return EnumAllVirtualSlots((MetadataType)type); } // Enumerate all possible virtual slots of a type public static IEnumerable<MethodDesc> EnumAllVirtualSlots(MetadataType type) { MethodDescHashtable alreadyEnumerated = new MethodDescHashtable(); if (!type.IsInterface) { do { foreach (MethodDesc m in type.GetAllVirtualMethods()) { MethodDesc possibleVirtual = FindSlotDefiningMethodForVirtualMethod(m); if (!alreadyEnumerated.Contains(possibleVirtual)) { alreadyEnumerated.AddOrGetExisting(possibleVirtual); yield return possibleVirtual; } } type = type.MetadataBaseType; } while (type != null); } } /// <summary> /// Try to resolve a given virtual static interface method on a given constrained type and its base types. /// </summary> /// <param name="interfaceMethod">Interface method to resolve</param> /// <param name="currentType">Type to attempt virtual static method resolution on</param> /// <returns>MethodDesc of the resolved virtual static method, null when not found (runtime lookup must be used)</returns> public static MethodDesc ResolveInterfaceMethodToStaticVirtualMethodOnType(MethodDesc interfaceMethod, MetadataType currentType) { TypeDesc interfaceType = interfaceMethod.OwningType; // Search for match on a per-level in the type hierarchy for (MetadataType typeToCheck = currentType; typeToCheck != null; typeToCheck = typeToCheck.MetadataBaseType) { MethodDesc resolvedMethodOnType = TryResolveVirtualStaticMethodOnThisType(typeToCheck, interfaceMethod); if (resolvedMethodOnType != null) { return resolvedMethodOnType; } } return null; } /// <summary> /// Try to resolve a given virtual static interface method on a given constrained type and its base types. /// </summary> /// <param name="interfaceMethod">Interface method to resolve</param> /// <param name="currentType">Type to attempt virtual static method resolution on</param> /// <returns>MethodDesc of the resolved virtual static method, null when not found (runtime lookup must be used)</returns> public static MethodDesc ResolveVariantInterfaceMethodToStaticVirtualMethodOnType(MethodDesc interfaceMethod, MetadataType currentType) { TypeDesc interfaceType = interfaceMethod.OwningType; // Search for match on a per-level in the type hierarchy for (MetadataType typeToCheck = currentType; typeToCheck != null; typeToCheck = typeToCheck.MetadataBaseType) { MethodDesc resolvedMethodOnType = TryResolveVirtualStaticMethodOnThisType(typeToCheck, interfaceMethod); if (resolvedMethodOnType != null) { return resolvedMethodOnType; } // Variant interface dispatch foreach (DefType runtimeInterfaceType in typeToCheck.RuntimeInterfaces) { if (runtimeInterfaceType == interfaceType) { // This is the variant interface check logic, skip this continue; } if (!runtimeInterfaceType.HasSameTypeDefinition(interfaceType)) { // Variance matches require a typedef match // Equivalence isn't sufficient, and is uninteresting as equivalent interfaces cannot have static virtuals. continue; } if (runtimeInterfaceType.CanCastTo(interfaceType)) { // Attempt to resolve on variance matched interface MethodDesc runtimeInterfaceMethod = runtimeInterfaceType.FindMethodOnExactTypeWithMatchingTypicalMethod(interfaceMethod); resolvedMethodOnType = TryResolveVirtualStaticMethodOnThisType(typeToCheck, runtimeInterfaceMethod); if (resolvedMethodOnType != null) { return resolvedMethodOnType; } } } } return null; } /// <summary> /// Try to resolve a given virtual static interface method on a given constrained type and return the resolved method or null when not found. /// </summary> /// <param name="constrainedType">Type to attempt method resolution on</param> /// <param name="interfaceMethod">Method to resolve</param> /// <returns>MethodDesc of the resolved method or null when not found (runtime lookup must be used)</returns> private static MethodDesc TryResolveVirtualStaticMethodOnThisType(MetadataType constrainedType, MethodDesc interfaceMethod) { Debug.Assert(interfaceMethod.Signature.IsStatic); MethodImplRecord[] possibleImpls = constrainedType.FindMethodsImplWithMatchingDeclName(interfaceMethod.Name); if (possibleImpls == null) return null; MethodDesc interfaceMethodDefinition = interfaceMethod.GetMethodDefinition(); foreach (MethodImplRecord methodImpl in possibleImpls) { if (methodImpl.Decl == interfaceMethodDefinition) { MethodDesc resolvedMethodImpl = methodImpl.Body; if (interfaceMethod != interfaceMethodDefinition) { resolvedMethodImpl = resolvedMethodImpl.MakeInstantiatedMethod(interfaceMethod.Instantiation); } return resolvedMethodImpl; } } return null; } } }
1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/coreclr/tools/Common/TypeSystem/Common/TypeSystemHelpers.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Diagnostics; namespace Internal.TypeSystem { public static class TypeSystemHelpers { public static bool IsWellKnownType(this TypeDesc type, WellKnownType wellKnownType) { return type == type.Context.GetWellKnownType(wellKnownType, false); } public static InstantiatedType MakeInstantiatedType(this MetadataType typeDef, Instantiation instantiation) { return typeDef.Context.GetInstantiatedType(typeDef, instantiation); } public static InstantiatedType MakeInstantiatedType(this MetadataType typeDef, params TypeDesc[] genericParameters) { return typeDef.Context.GetInstantiatedType(typeDef, new Instantiation(genericParameters)); } public static InstantiatedMethod MakeInstantiatedMethod(this MethodDesc methodDef, Instantiation instantiation) { return methodDef.Context.GetInstantiatedMethod(methodDef, instantiation); } public static InstantiatedMethod MakeInstantiatedMethod(this MethodDesc methodDef, params TypeDesc[] genericParameters) { return methodDef.Context.GetInstantiatedMethod(methodDef, new Instantiation(genericParameters)); } public static ArrayType MakeArrayType(this TypeDesc type) { return type.Context.GetArrayType(type); } /// <summary> /// Creates a multidimensional array type with the specified rank. /// To create a vector, use the <see cref="MakeArrayType(TypeDesc)"/> overload. /// </summary> public static ArrayType MakeArrayType(this TypeDesc type, int rank) { return type.Context.GetArrayType(type, rank); } public static ByRefType MakeByRefType(this TypeDesc type) { return type.Context.GetByRefType(type); } public static PointerType MakePointerType(this TypeDesc type) { return type.Context.GetPointerType(type); } public static TypeDesc GetParameterType(this TypeDesc type) { ParameterizedType paramType = (ParameterizedType)type; return paramType.ParameterType; } public static bool HasLayout(this MetadataType mdType) { return mdType.IsSequentialLayout || mdType.IsExplicitLayout; } public static LayoutInt GetElementSize(this TypeDesc type) { if (type.IsValueType) { return ((DefType)type).InstanceFieldSize; } else { return type.Context.Target.LayoutPointerSize; } } /// <summary> /// Gets the parameterless instance constructor on the specified type. To get the default constructor, use <see cref="TypeDesc.GetDefaultConstructor"/>. /// </summary> public static MethodDesc GetParameterlessConstructor(this TypeDesc type) { // TODO: Do we want check for specialname/rtspecialname? Maybe add another overload on GetMethod? var sig = new MethodSignature(0, 0, type.Context.GetWellKnownType(WellKnownType.Void), TypeDesc.EmptyTypes); return type.GetMethod(".ctor", sig); } public static bool HasExplicitOrImplicitDefaultConstructor(this TypeDesc type) { return type.IsValueType || type.GetDefaultConstructor() != null; } internal static MethodDesc FindMethodOnExactTypeWithMatchingTypicalMethod(this TypeDesc type, MethodDesc method) { MethodDesc methodTypicalDefinition = method.GetTypicalMethodDefinition(); var instantiatedType = type as InstantiatedType; if (instantiatedType != null) { Debug.Assert(instantiatedType.GetTypeDefinition() == methodTypicalDefinition.OwningType); return method.Context.GetMethodForInstantiatedType(methodTypicalDefinition, instantiatedType); } else if (type.IsArray) { Debug.Assert(method.OwningType.IsArray); return ((ArrayType)type).GetArrayMethod(((ArrayMethod)method).Kind); } else { Debug.Assert(type == methodTypicalDefinition.OwningType); return methodTypicalDefinition; } } /// <summary> /// Returns method as defined on a non-generic base class or on a base /// instantiation. /// For example, If Foo&lt;T&gt; : Bar&lt;T&gt; and overrides method M, /// if method is Bar&lt;string&gt;.M(), then this returns Bar&lt;T&gt;.M() /// but if Foo : Bar&lt;string&gt;, then this returns Bar&lt;string&gt;.M() /// </summary> /// <param name="targetType">A potentially derived type</param> /// <param name="method">A base class's virtual method</param> public static MethodDesc FindMethodOnTypeWithMatchingTypicalMethod(this TypeDesc targetType, MethodDesc method) { // If method is nongeneric and on a nongeneric type, then it is the matching method if (!method.HasInstantiation && !method.OwningType.HasInstantiation) { return method; } // Since method is an instantiation that may or may not be the same as typeExamine's hierarchy, // find a matching base class on an open type and then work from the instantiation in typeExamine's // hierarchy TypeDesc typicalTypeOfTargetMethod = method.GetTypicalMethodDefinition().OwningType; TypeDesc targetOrBase = targetType; do { TypeDesc openTargetOrBase = targetOrBase; if (openTargetOrBase is InstantiatedType) { openTargetOrBase = openTargetOrBase.GetTypeDefinition(); } if (openTargetOrBase == typicalTypeOfTargetMethod) { // Found an open match. Now find an equivalent method on the original target typeOrBase MethodDesc matchingMethod = targetOrBase.FindMethodOnExactTypeWithMatchingTypicalMethod(method); return matchingMethod; } targetOrBase = targetOrBase.BaseType; } while (targetOrBase != null); Debug.Fail("method has no related type in the type hierarchy of type"); return null; } /// <summary> /// Retrieves the namespace qualified name of a <see cref="DefType"/>. /// </summary> public static string GetFullName(this DefType metadataType) { string ns = metadataType.Namespace; return ns.Length > 0 ? string.Concat(ns, ".", metadataType.Name) : metadataType.Name; } /// <summary> /// Retrieves all methods on a type, including the ones injected by the type system context. /// </summary> public static IEnumerable<MethodDesc> GetAllMethods(this TypeDesc type) { return type.Context.GetAllMethods(type); } /// <summary> /// Retrieves all virtual methods on a type, including the ones injected by the type system context. /// </summary> public static IEnumerable<MethodDesc> GetAllVirtualMethods(this TypeDesc type) { return type.Context.GetAllVirtualMethods(type); } public static IEnumerable<MethodDesc> EnumAllVirtualSlots(this TypeDesc type) { return type.Context.GetVirtualMethodAlgorithmForType(type).ComputeAllVirtualSlots(type); } /// <summary> /// Resolves interface method '<paramref name="interfaceMethod"/>' to a method on '<paramref name="type"/>' /// that implements the the method. /// </summary> public static MethodDesc ResolveInterfaceMethodToVirtualMethodOnType(this TypeDesc type, MethodDesc interfaceMethod) { return type.Context.GetVirtualMethodAlgorithmForType(type).ResolveInterfaceMethodToVirtualMethodOnType(interfaceMethod, type); } public static MethodDesc ResolveVariantInterfaceMethodToVirtualMethodOnType(this TypeDesc type, MethodDesc interfaceMethod) { return type.Context.GetVirtualMethodAlgorithmForType(type).ResolveVariantInterfaceMethodToVirtualMethodOnType(interfaceMethod, type); } public static DefaultInterfaceMethodResolution ResolveInterfaceMethodToDefaultImplementationOnType(this TypeDesc type, MethodDesc interfaceMethod, out MethodDesc implMethod) { return type.Context.GetVirtualMethodAlgorithmForType(type).ResolveInterfaceMethodToDefaultImplementationOnType(interfaceMethod, type, out implMethod); } /// <summary> /// Resolves a virtual method call. /// </summary> public static MethodDesc FindVirtualFunctionTargetMethodOnObjectType(this TypeDesc type, MethodDesc targetMethod) { return type.Context.GetVirtualMethodAlgorithmForType(type).FindVirtualFunctionTargetMethodOnObjectType(targetMethod, type); } /// <summary> /// Creates an open instantiation of a type. Given Foo&lt;T&gt;, returns Foo&lt;!0&gt;. /// If the type is not generic, returns the <paramref name="type"/>. /// </summary> public static TypeDesc InstantiateAsOpen(this TypeDesc type) { if (!type.IsGenericDefinition) { Debug.Assert(!type.HasInstantiation); return type; } TypeSystemContext context = type.Context; var inst = new TypeDesc[type.Instantiation.Length]; for (int i = 0; i < inst.Length; i++) { inst[i] = context.GetSignatureVariable(i, false); } return context.GetInstantiatedType((MetadataType)type, new Instantiation(inst)); } /// <summary> /// Creates an open instantiation of a field. Given Foo&lt;T&gt;.Field, returns /// Foo&lt;!0&gt;.Field. If the owning type is not generic, returns the <paramref name="field"/>. /// </summary> public static FieldDesc InstantiateAsOpen(this FieldDesc field) { Debug.Assert(field.GetTypicalFieldDefinition() == field); TypeDesc owner = field.OwningType; if (owner.HasInstantiation) { var instantiatedOwner = (InstantiatedType)owner.InstantiateAsOpen(); return field.Context.GetFieldForInstantiatedType(field, instantiatedOwner); } return field; } /// <summary> /// Creates an open instantiation of a method. Given Foo&lt;T&gt;.Method, returns /// Foo&lt;!0&gt;.Method. If the owning type is not generic, returns the <paramref name="method"/>. /// </summary> public static MethodDesc InstantiateAsOpen(this MethodDesc method) { Debug.Assert(method.IsMethodDefinition && !method.HasInstantiation); TypeDesc owner = method.OwningType; if (owner.HasInstantiation) { MetadataType instantiatedOwner = (MetadataType)owner.InstantiateAsOpen(); return method.Context.GetMethodForInstantiatedType(method, (InstantiatedType)instantiatedOwner); } return method; } /// <summary> /// Scan the type and its base types for an implementation of an interface method. Returns null if no /// implementation is found. /// </summary> public static MethodDesc ResolveInterfaceMethodTarget(this TypeDesc thisType, MethodDesc interfaceMethodToResolve) { Debug.Assert(interfaceMethodToResolve.OwningType.IsInterface); MethodDesc result; TypeDesc currentType = thisType; do { result = currentType.ResolveInterfaceMethodToVirtualMethodOnType(interfaceMethodToResolve); currentType = currentType.BaseType; } while (result == null && currentType != null); return result; } /// <summary> /// Scan the type and its base types for an implementation of an interface method. Returns null if no /// implementation is found. /// </summary> public static MethodDesc ResolveInterfaceMethodTargetWithVariance(this TypeDesc thisType, MethodDesc interfaceMethodToResolve) { Debug.Assert(interfaceMethodToResolve.OwningType.IsInterface); MethodDesc result; TypeDesc currentType = thisType; do { result = currentType.ResolveVariantInterfaceMethodToVirtualMethodOnType(interfaceMethodToResolve); currentType = currentType.BaseType; } while (result == null && currentType != null); return result; } public static bool ContainsSignatureVariables(this TypeDesc thisType, bool treatGenericParameterLikeSignatureVariable = false) { switch (thisType.Category) { case TypeFlags.Array: case TypeFlags.SzArray: case TypeFlags.ByRef: case TypeFlags.Pointer: return ((ParameterizedType)thisType).ParameterType.ContainsSignatureVariables(treatGenericParameterLikeSignatureVariable); case TypeFlags.FunctionPointer: MethodSignature pointerSignature = ((FunctionPointerType)thisType).Signature; for (int i = 0; i < pointerSignature.Length; i++) if (pointerSignature[i].ContainsSignatureVariables(treatGenericParameterLikeSignatureVariable)) return true; return pointerSignature.ReturnType.ContainsSignatureVariables(treatGenericParameterLikeSignatureVariable); case TypeFlags.SignatureMethodVariable: case TypeFlags.SignatureTypeVariable: return true; case TypeFlags.GenericParameter: if (treatGenericParameterLikeSignatureVariable) return true; // It is generally a bug to have instantiations over generic parameters // in the system. Typical instantiations are represented as instantiations // over own formals - so these should be signature variables instead. throw new ArgumentException(); default: Debug.Assert(thisType is DefType); foreach (TypeDesc arg in thisType.Instantiation) { if (arg.ContainsSignatureVariables(treatGenericParameterLikeSignatureVariable)) return true; } return false; } } /// <summary> /// Check if MethodImpl requires slot unification. /// </summary> /// <param name="method">Method to check</param> /// <returns>True when the method is marked with the PreserveBaseOverrides custom attribute, false otherwise.</returns> public static bool RequiresSlotUnification(this MethodDesc method) { if (method.HasCustomAttribute("System.Runtime.CompilerServices", "PreserveBaseOverridesAttribute")) { #if DEBUG // We shouldn't be calling this for non-MethodImpls, so verify that the method being checked is really a MethodImpl MetadataType mdType = method.OwningType as MetadataType; if (mdType != null) { bool isMethodImpl = false; foreach (MethodImplRecord methodImplRecord in mdType.VirtualMethodImplsForType) { if (method == methodImplRecord.Body) { isMethodImpl = true; break; } } Debug.Assert(isMethodImpl); } #endif return true; } return false; } /// <summary> /// Determines whether an object of type '<paramref name="type"/>' requires 8-byte alignment on /// 32bit ARM or 32bit Wasm architectures. /// </summary> public static bool RequiresAlign8(this TypeDesc type) { if (type.Context.Target.Architecture != TargetArchitecture.ARM && type.Context.Target.Architecture != TargetArchitecture.Wasm32) { return false; } if (type.IsArray) { var elementType = ((ArrayType)type).ElementType; if (elementType.IsValueType) { var alignment = ((DefType)elementType).InstanceByteAlignment; if (!alignment.IsIndeterminate && alignment.AsInt > 4) { return true; } } } else if (type.IsDefType) { var alignment = ((DefType)type).InstanceByteAlignment; if (!alignment.IsIndeterminate && alignment.AsInt > 4) { return true; } } return false; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Diagnostics; namespace Internal.TypeSystem { public static class TypeSystemHelpers { public static bool IsWellKnownType(this TypeDesc type, WellKnownType wellKnownType) { return type == type.Context.GetWellKnownType(wellKnownType, false); } public static InstantiatedType MakeInstantiatedType(this MetadataType typeDef, Instantiation instantiation) { return typeDef.Context.GetInstantiatedType(typeDef, instantiation); } public static InstantiatedType MakeInstantiatedType(this MetadataType typeDef, params TypeDesc[] genericParameters) { return typeDef.Context.GetInstantiatedType(typeDef, new Instantiation(genericParameters)); } public static InstantiatedMethod MakeInstantiatedMethod(this MethodDesc methodDef, Instantiation instantiation) { return methodDef.Context.GetInstantiatedMethod(methodDef, instantiation); } public static InstantiatedMethod MakeInstantiatedMethod(this MethodDesc methodDef, params TypeDesc[] genericParameters) { return methodDef.Context.GetInstantiatedMethod(methodDef, new Instantiation(genericParameters)); } public static ArrayType MakeArrayType(this TypeDesc type) { return type.Context.GetArrayType(type); } /// <summary> /// Creates a multidimensional array type with the specified rank. /// To create a vector, use the <see cref="MakeArrayType(TypeDesc)"/> overload. /// </summary> public static ArrayType MakeArrayType(this TypeDesc type, int rank) { return type.Context.GetArrayType(type, rank); } public static ByRefType MakeByRefType(this TypeDesc type) { return type.Context.GetByRefType(type); } public static PointerType MakePointerType(this TypeDesc type) { return type.Context.GetPointerType(type); } public static TypeDesc GetParameterType(this TypeDesc type) { ParameterizedType paramType = (ParameterizedType)type; return paramType.ParameterType; } public static bool HasLayout(this MetadataType mdType) { return mdType.IsSequentialLayout || mdType.IsExplicitLayout; } public static LayoutInt GetElementSize(this TypeDesc type) { if (type.IsValueType) { return ((DefType)type).InstanceFieldSize; } else { return type.Context.Target.LayoutPointerSize; } } /// <summary> /// Gets the parameterless instance constructor on the specified type. To get the default constructor, use <see cref="TypeDesc.GetDefaultConstructor"/>. /// </summary> public static MethodDesc GetParameterlessConstructor(this TypeDesc type) { // TODO: Do we want check for specialname/rtspecialname? Maybe add another overload on GetMethod? var sig = new MethodSignature(0, 0, type.Context.GetWellKnownType(WellKnownType.Void), TypeDesc.EmptyTypes); return type.GetMethod(".ctor", sig); } public static bool HasExplicitOrImplicitDefaultConstructor(this TypeDesc type) { return type.IsValueType || type.GetDefaultConstructor() != null; } internal static MethodDesc FindMethodOnExactTypeWithMatchingTypicalMethod(this TypeDesc type, MethodDesc method) { MethodDesc methodTypicalDefinition = method.GetTypicalMethodDefinition(); var instantiatedType = type as InstantiatedType; if (instantiatedType != null) { Debug.Assert(instantiatedType.GetTypeDefinition() == methodTypicalDefinition.OwningType); return method.Context.GetMethodForInstantiatedType(methodTypicalDefinition, instantiatedType); } else if (type.IsArray) { Debug.Assert(method.OwningType.IsArray); return ((ArrayType)type).GetArrayMethod(((ArrayMethod)method).Kind); } else { Debug.Assert(type == methodTypicalDefinition.OwningType); return methodTypicalDefinition; } } /// <summary> /// Returns method as defined on a non-generic base class or on a base /// instantiation. /// For example, If Foo&lt;T&gt; : Bar&lt;T&gt; and overrides method M, /// if method is Bar&lt;string&gt;.M(), then this returns Bar&lt;T&gt;.M() /// but if Foo : Bar&lt;string&gt;, then this returns Bar&lt;string&gt;.M() /// </summary> /// <param name="targetType">A potentially derived type</param> /// <param name="method">A base class's virtual method</param> public static MethodDesc FindMethodOnTypeWithMatchingTypicalMethod(this TypeDesc targetType, MethodDesc method) { // If method is nongeneric and on a nongeneric type, then it is the matching method if (!method.HasInstantiation && !method.OwningType.HasInstantiation) { return method; } // Since method is an instantiation that may or may not be the same as typeExamine's hierarchy, // find a matching base class on an open type and then work from the instantiation in typeExamine's // hierarchy TypeDesc typicalTypeOfTargetMethod = method.GetTypicalMethodDefinition().OwningType; TypeDesc targetOrBase = targetType; do { TypeDesc openTargetOrBase = targetOrBase; if (openTargetOrBase is InstantiatedType) { openTargetOrBase = openTargetOrBase.GetTypeDefinition(); } if (openTargetOrBase == typicalTypeOfTargetMethod) { // Found an open match. Now find an equivalent method on the original target typeOrBase MethodDesc matchingMethod = targetOrBase.FindMethodOnExactTypeWithMatchingTypicalMethod(method); return matchingMethod; } targetOrBase = targetOrBase.BaseType; } while (targetOrBase != null); Debug.Fail("method has no related type in the type hierarchy of type"); return null; } /// <summary> /// Retrieves the namespace qualified name of a <see cref="DefType"/>. /// </summary> public static string GetFullName(this DefType metadataType) { string ns = metadataType.Namespace; return ns.Length > 0 ? string.Concat(ns, ".", metadataType.Name) : metadataType.Name; } /// <summary> /// Retrieves all methods on a type, including the ones injected by the type system context. /// </summary> public static IEnumerable<MethodDesc> GetAllMethods(this TypeDesc type) { return type.Context.GetAllMethods(type); } /// <summary> /// Retrieves all virtual methods on a type, including the ones injected by the type system context. /// </summary> public static IEnumerable<MethodDesc> GetAllVirtualMethods(this TypeDesc type) { return type.Context.GetAllVirtualMethods(type); } public static IEnumerable<MethodDesc> EnumAllVirtualSlots(this TypeDesc type) { return type.Context.GetVirtualMethodAlgorithmForType(type).ComputeAllVirtualSlots(type); } /// <summary> /// Resolves interface method '<paramref name="interfaceMethod"/>' to a method on '<paramref name="type"/>' /// that implements the the method. /// </summary> public static MethodDesc ResolveInterfaceMethodToVirtualMethodOnType(this TypeDesc type, MethodDesc interfaceMethod) { return type.Context.GetVirtualMethodAlgorithmForType(type).ResolveInterfaceMethodToVirtualMethodOnType(interfaceMethod, type); } public static MethodDesc ResolveVariantInterfaceMethodToVirtualMethodOnType(this TypeDesc type, MethodDesc interfaceMethod) { return type.Context.GetVirtualMethodAlgorithmForType(type).ResolveVariantInterfaceMethodToVirtualMethodOnType(interfaceMethod, type); } public static MethodDesc ResolveInterfaceMethodToStaticVirtualMethodOnType(this TypeDesc type, MethodDesc interfaceMethod) { return type.Context.GetVirtualMethodAlgorithmForType(type).ResolveInterfaceMethodToStaticVirtualMethodOnType(interfaceMethod, type); } public static MethodDesc ResolveVariantInterfaceMethodToStaticVirtualMethodOnType(this TypeDesc type, MethodDesc interfaceMethod) { return type.Context.GetVirtualMethodAlgorithmForType(type).ResolveVariantInterfaceMethodToStaticVirtualMethodOnType(interfaceMethod, type); } public static DefaultInterfaceMethodResolution ResolveInterfaceMethodToDefaultImplementationOnType(this TypeDesc type, MethodDesc interfaceMethod, out MethodDesc implMethod) { return type.Context.GetVirtualMethodAlgorithmForType(type).ResolveInterfaceMethodToDefaultImplementationOnType(interfaceMethod, type, out implMethod); } /// <summary> /// Resolves a virtual method call. /// </summary> public static MethodDesc FindVirtualFunctionTargetMethodOnObjectType(this TypeDesc type, MethodDesc targetMethod) { return type.Context.GetVirtualMethodAlgorithmForType(type).FindVirtualFunctionTargetMethodOnObjectType(targetMethod, type); } /// <summary> /// Creates an open instantiation of a type. Given Foo&lt;T&gt;, returns Foo&lt;!0&gt;. /// If the type is not generic, returns the <paramref name="type"/>. /// </summary> public static TypeDesc InstantiateAsOpen(this TypeDesc type) { if (!type.IsGenericDefinition) { Debug.Assert(!type.HasInstantiation); return type; } TypeSystemContext context = type.Context; var inst = new TypeDesc[type.Instantiation.Length]; for (int i = 0; i < inst.Length; i++) { inst[i] = context.GetSignatureVariable(i, false); } return context.GetInstantiatedType((MetadataType)type, new Instantiation(inst)); } /// <summary> /// Creates an open instantiation of a field. Given Foo&lt;T&gt;.Field, returns /// Foo&lt;!0&gt;.Field. If the owning type is not generic, returns the <paramref name="field"/>. /// </summary> public static FieldDesc InstantiateAsOpen(this FieldDesc field) { Debug.Assert(field.GetTypicalFieldDefinition() == field); TypeDesc owner = field.OwningType; if (owner.HasInstantiation) { var instantiatedOwner = (InstantiatedType)owner.InstantiateAsOpen(); return field.Context.GetFieldForInstantiatedType(field, instantiatedOwner); } return field; } /// <summary> /// Creates an open instantiation of a method. Given Foo&lt;T&gt;.Method, returns /// Foo&lt;!0&gt;.Method. If the owning type is not generic, returns the <paramref name="method"/>. /// </summary> public static MethodDesc InstantiateAsOpen(this MethodDesc method) { Debug.Assert(method.IsMethodDefinition && !method.HasInstantiation); TypeDesc owner = method.OwningType; if (owner.HasInstantiation) { MetadataType instantiatedOwner = (MetadataType)owner.InstantiateAsOpen(); return method.Context.GetMethodForInstantiatedType(method, (InstantiatedType)instantiatedOwner); } return method; } /// <summary> /// Scan the type and its base types for an implementation of an interface method. Returns null if no /// implementation is found. /// </summary> public static MethodDesc ResolveInterfaceMethodTarget(this TypeDesc thisType, MethodDesc interfaceMethodToResolve) { Debug.Assert(interfaceMethodToResolve.OwningType.IsInterface); MethodDesc result; TypeDesc currentType = thisType; do { result = currentType.ResolveInterfaceMethodToVirtualMethodOnType(interfaceMethodToResolve); currentType = currentType.BaseType; } while (result == null && currentType != null); return result; } /// <summary> /// Scan the type and its base types for an implementation of an interface method. Returns null if no /// implementation is found. /// </summary> public static MethodDesc ResolveInterfaceMethodTargetWithVariance(this TypeDesc thisType, MethodDesc interfaceMethodToResolve) { Debug.Assert(interfaceMethodToResolve.OwningType.IsInterface); MethodDesc result; TypeDesc currentType = thisType; do { result = currentType.ResolveVariantInterfaceMethodToVirtualMethodOnType(interfaceMethodToResolve); currentType = currentType.BaseType; } while (result == null && currentType != null); return result; } public static bool ContainsSignatureVariables(this TypeDesc thisType, bool treatGenericParameterLikeSignatureVariable = false) { switch (thisType.Category) { case TypeFlags.Array: case TypeFlags.SzArray: case TypeFlags.ByRef: case TypeFlags.Pointer: return ((ParameterizedType)thisType).ParameterType.ContainsSignatureVariables(treatGenericParameterLikeSignatureVariable); case TypeFlags.FunctionPointer: MethodSignature pointerSignature = ((FunctionPointerType)thisType).Signature; for (int i = 0; i < pointerSignature.Length; i++) if (pointerSignature[i].ContainsSignatureVariables(treatGenericParameterLikeSignatureVariable)) return true; return pointerSignature.ReturnType.ContainsSignatureVariables(treatGenericParameterLikeSignatureVariable); case TypeFlags.SignatureMethodVariable: case TypeFlags.SignatureTypeVariable: return true; case TypeFlags.GenericParameter: if (treatGenericParameterLikeSignatureVariable) return true; // It is generally a bug to have instantiations over generic parameters // in the system. Typical instantiations are represented as instantiations // over own formals - so these should be signature variables instead. throw new ArgumentException(); default: Debug.Assert(thisType is DefType); foreach (TypeDesc arg in thisType.Instantiation) { if (arg.ContainsSignatureVariables(treatGenericParameterLikeSignatureVariable)) return true; } return false; } } /// <summary> /// Check if MethodImpl requires slot unification. /// </summary> /// <param name="method">Method to check</param> /// <returns>True when the method is marked with the PreserveBaseOverrides custom attribute, false otherwise.</returns> public static bool RequiresSlotUnification(this MethodDesc method) { if (method.HasCustomAttribute("System.Runtime.CompilerServices", "PreserveBaseOverridesAttribute")) { #if DEBUG // We shouldn't be calling this for non-MethodImpls, so verify that the method being checked is really a MethodImpl MetadataType mdType = method.OwningType as MetadataType; if (mdType != null) { bool isMethodImpl = false; foreach (MethodImplRecord methodImplRecord in mdType.VirtualMethodImplsForType) { if (method == methodImplRecord.Body) { isMethodImpl = true; break; } } Debug.Assert(isMethodImpl); } #endif return true; } return false; } /// <summary> /// Determines whether an object of type '<paramref name="type"/>' requires 8-byte alignment on /// 32bit ARM or 32bit Wasm architectures. /// </summary> public static bool RequiresAlign8(this TypeDesc type) { if (type.Context.Target.Architecture != TargetArchitecture.ARM && type.Context.Target.Architecture != TargetArchitecture.Wasm32) { return false; } if (type.IsArray) { var elementType = ((ArrayType)type).ElementType; if (elementType.IsValueType) { var alignment = ((DefType)elementType).InstanceByteAlignment; if (!alignment.IsIndeterminate && alignment.AsInt > 4) { return true; } } } else if (type.IsDefType) { var alignment = ((DefType)type).InstanceByteAlignment; if (!alignment.IsIndeterminate && alignment.AsInt > 4) { return true; } } return false; } } }
1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/coreclr/tools/Common/TypeSystem/Common/VirtualMethodAlgorithm.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; namespace Internal.TypeSystem { /// <summary> /// Pluggable virtual method computation algorithm. Provides an abstraction to resolve /// virtual and interface methods on types. /// </summary> /// <remarks> /// The algorithms are expected to be directly used by <see cref="TypeSystemContext"/> derivatives /// only. The most obvious implementation of this algorithm that uses type's metadata to /// compute the answers is in <see cref="MetadataVirtualMethodAlgorithm"/>. /// </remarks> public abstract class VirtualMethodAlgorithm { /// <summary> /// Resolves interface method '<paramref name="interfaceMethod"/>' to a method on '<paramref name="currentType"/>' /// that implements the the method. /// </summary> public abstract MethodDesc ResolveInterfaceMethodToVirtualMethodOnType(MethodDesc interfaceMethod, TypeDesc currentType); public abstract MethodDesc ResolveVariantInterfaceMethodToVirtualMethodOnType(MethodDesc interfaceMethod, TypeDesc currentType); public abstract DefaultInterfaceMethodResolution ResolveInterfaceMethodToDefaultImplementationOnType(MethodDesc interfaceMethod, TypeDesc currentType, out MethodDesc impl); /// <summary> /// Resolves a virtual method call. /// </summary> public abstract MethodDesc FindVirtualFunctionTargetMethodOnObjectType(MethodDesc targetMethod, TypeDesc objectType); /// <summary> /// Enumerates all virtual slots on '<paramref name="type"/>'. /// </summary> public abstract IEnumerable<MethodDesc> ComputeAllVirtualSlots(TypeDesc type); } public enum DefaultInterfaceMethodResolution { /// <summary> /// No default implementation was found. /// </summary> None, /// <summary> /// A default implementation was found. /// </summary> DefaultImplementation, /// <summary> /// The implementation was reabstracted. /// </summary> Reabstraction, /// <summary> /// The default implementation conflicts. /// </summary> Diamond, } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; namespace Internal.TypeSystem { /// <summary> /// Pluggable virtual method computation algorithm. Provides an abstraction to resolve /// virtual and interface methods on types. /// </summary> /// <remarks> /// The algorithms are expected to be directly used by <see cref="TypeSystemContext"/> derivatives /// only. The most obvious implementation of this algorithm that uses type's metadata to /// compute the answers is in <see cref="MetadataVirtualMethodAlgorithm"/>. /// </remarks> public abstract class VirtualMethodAlgorithm { /// <summary> /// Resolves interface method '<paramref name="interfaceMethod"/>' to a method on '<paramref name="currentType"/>' /// that implements the the method. /// </summary> public abstract MethodDesc ResolveInterfaceMethodToVirtualMethodOnType(MethodDesc interfaceMethod, TypeDesc currentType); public abstract MethodDesc ResolveVariantInterfaceMethodToVirtualMethodOnType(MethodDesc interfaceMethod, TypeDesc currentType); public abstract MethodDesc ResolveInterfaceMethodToStaticVirtualMethodOnType(MethodDesc interfaceMethod, TypeDesc currentType); public abstract MethodDesc ResolveVariantInterfaceMethodToStaticVirtualMethodOnType(MethodDesc interfaceMethod, TypeDesc currentType); public abstract DefaultInterfaceMethodResolution ResolveInterfaceMethodToDefaultImplementationOnType(MethodDesc interfaceMethod, TypeDesc currentType, out MethodDesc impl); /// <summary> /// Resolves a virtual method call. /// </summary> public abstract MethodDesc FindVirtualFunctionTargetMethodOnObjectType(MethodDesc targetMethod, TypeDesc objectType); /// <summary> /// Enumerates all virtual slots on '<paramref name="type"/>'. /// </summary> public abstract IEnumerable<MethodDesc> ComputeAllVirtualSlots(TypeDesc type); } public enum DefaultInterfaceMethodResolution { /// <summary> /// No default implementation was found. /// </summary> None, /// <summary> /// A default implementation was found. /// </summary> DefaultImplementation, /// <summary> /// The implementation was reabstracted. /// </summary> Reabstraction, /// <summary> /// The default implementation conflicts. /// </summary> Diamond, } }
1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/Compilation.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Collections.Immutable; using System.IO; using System.Runtime.InteropServices; using ILCompiler.DependencyAnalysis; using ILCompiler.DependencyAnalysisFramework; using Internal.IL; using Internal.IL.Stubs; using Internal.TypeSystem; using Internal.TypeSystem.Ecma; using CORINFO_DEVIRTUALIZATION_DETAIL = Internal.JitInterface.CORINFO_DEVIRTUALIZATION_DETAIL; using Debug = System.Diagnostics.Debug; namespace ILCompiler { public abstract class Compilation : ICompilation { protected readonly DependencyAnalyzerBase<NodeFactory> _dependencyGraph; protected readonly NodeFactory _nodeFactory; protected readonly Logger _logger; protected readonly DebugInformationProvider _debugInformationProvider; protected readonly DevirtualizationManager _devirtualizationManager; private readonly IInliningPolicy _inliningPolicy; public NameMangler NameMangler => _nodeFactory.NameMangler; public NodeFactory NodeFactory => _nodeFactory; public CompilerTypeSystemContext TypeSystemContext => NodeFactory.TypeSystemContext; public Logger Logger => _logger; public PInvokeILProvider PInvokeILProvider { get; } private readonly TypeGetTypeMethodThunkCache _typeGetTypeMethodThunks; private readonly AssemblyGetExecutingAssemblyMethodThunkCache _assemblyGetExecutingAssemblyMethodThunks; private readonly MethodBaseGetCurrentMethodThunkCache _methodBaseGetCurrentMethodThunks; protected Compilation( DependencyAnalyzerBase<NodeFactory> dependencyGraph, NodeFactory nodeFactory, IEnumerable<ICompilationRootProvider> compilationRoots, ILProvider ilProvider, DebugInformationProvider debugInformationProvider, DevirtualizationManager devirtualizationManager, IInliningPolicy inliningPolicy, Logger logger) { _dependencyGraph = dependencyGraph; _nodeFactory = nodeFactory; _logger = logger; _debugInformationProvider = debugInformationProvider; _devirtualizationManager = devirtualizationManager; _inliningPolicy = inliningPolicy; _dependencyGraph.ComputeDependencyRoutine += ComputeDependencyNodeDependencies; NodeFactory.AttachToDependencyGraph(_dependencyGraph); var rootingService = new RootingServiceProvider(nodeFactory, _dependencyGraph.AddRoot); foreach (var rootProvider in compilationRoots) rootProvider.AddCompilationRoots(rootingService); MetadataType globalModuleGeneratedType = nodeFactory.TypeSystemContext.GeneratedAssembly.GetGlobalModuleType(); _typeGetTypeMethodThunks = new TypeGetTypeMethodThunkCache(globalModuleGeneratedType); _assemblyGetExecutingAssemblyMethodThunks = new AssemblyGetExecutingAssemblyMethodThunkCache(globalModuleGeneratedType); _methodBaseGetCurrentMethodThunks = new MethodBaseGetCurrentMethodThunkCache(); PInvokeILProvider = _nodeFactory.InteropStubManager.CreatePInvokeILProvider(); if (PInvokeILProvider != null) { ilProvider = new CombinedILProvider(ilProvider, PInvokeILProvider); } _methodILCache = new ILCache(ilProvider); } private ILCache _methodILCache; public virtual MethodIL GetMethodIL(MethodDesc method) { // Flush the cache when it grows too big if (_methodILCache.Count > 1000) _methodILCache = new ILCache(_methodILCache.ILProvider); return _methodILCache.GetOrCreateValue(method).MethodIL; } protected abstract void ComputeDependencyNodeDependencies(List<DependencyNodeCore<NodeFactory>> obj); protected abstract void CompileInternal(string outputFile, ObjectDumper dumper); public void DetectGenericCycles(MethodDesc caller, MethodDesc callee) { _nodeFactory.TypeSystemContext.DetectGenericCycles(caller, callee); } public virtual IEETypeNode NecessaryTypeSymbolIfPossible(TypeDesc type) { return _nodeFactory.NecessaryTypeSymbol(type); } public bool CanInline(MethodDesc caller, MethodDesc callee) { return _inliningPolicy.CanInline(caller, callee); } public bool CanConstructType(TypeDesc type) { return _devirtualizationManager.CanConstructType(type); } public DelegateCreationInfo GetDelegateCtor(TypeDesc delegateType, MethodDesc target, bool followVirtualDispatch) { // If we're creating a delegate to a virtual method that cannot be overriden, devirtualize. // This is not just an optimization - it's required for correctness in the presence of sealed // vtable slots. if (followVirtualDispatch && (target.IsFinal || target.OwningType.IsSealed())) followVirtualDispatch = false; if (followVirtualDispatch) target = MetadataVirtualMethodAlgorithm.FindSlotDefiningMethodForVirtualMethod(target); return DelegateCreationInfo.Create(delegateType, target, NodeFactory, followVirtualDispatch); } /// <summary> /// Gets an object representing the static data for RVA mapped fields from the PE image. /// </summary> public virtual ISymbolNode GetFieldRvaData(FieldDesc field) { if (field.GetType() == typeof(PInvokeLazyFixupField)) { return NodeFactory.PInvokeMethodFixup(new PInvokeMethodData((PInvokeLazyFixupField)field)); } else if (field is ExternSymbolMappedField externField) { return NodeFactory.ExternSymbol(externField.SymbolName); } else { // Use the typical field definition in case this is an instantiated generic type field = field.GetTypicalFieldDefinition(); int fieldTypePack = (field.FieldType as MetadataType)?.GetClassLayout().PackingSize ?? 1; return NodeFactory.ReadOnlyDataBlob(NameMangler.GetMangledFieldName(field), ((EcmaField)field).GetFieldRvaData(), Math.Max(NodeFactory.Target.PointerSize, fieldTypePack)); } } public bool HasLazyStaticConstructor(TypeDesc type) { return NodeFactory.PreinitializationManager.HasLazyStaticConstructor(type); } public MethodDebugInformation GetDebugInfo(MethodIL methodIL) { return _debugInformationProvider.GetDebugInfo(methodIL); } /// <summary> /// Resolves a reference to an intrinsic method to a new method that takes it's place in the compilation. /// This is used for intrinsics where the intrinsic expansion depends on the callsite. /// </summary> /// <param name="intrinsicMethod">The intrinsic method called.</param> /// <param name="callsiteMethod">The callsite that calls the intrinsic.</param> /// <returns>The intrinsic implementation to be called for this specific callsite.</returns> public MethodDesc ExpandIntrinsicForCallsite(MethodDesc intrinsicMethod, MethodDesc callsiteMethod) { Debug.Assert(intrinsicMethod.IsIntrinsic); var intrinsicOwningType = intrinsicMethod.OwningType as MetadataType; if (intrinsicOwningType == null) return intrinsicMethod; if (intrinsicOwningType.Module != TypeSystemContext.SystemModule) return intrinsicMethod; if (intrinsicOwningType.Name == "Type" && intrinsicOwningType.Namespace == "System") { if (intrinsicMethod.Signature.IsStatic && intrinsicMethod.Name == "GetType") { ModuleDesc callsiteModule = (callsiteMethod.OwningType as MetadataType)?.Module; if (callsiteModule != null) { Debug.Assert(callsiteModule is IAssemblyDesc, "Multi-module assemblies"); return _typeGetTypeMethodThunks.GetHelper(intrinsicMethod, ((IAssemblyDesc)callsiteModule).GetName().FullName); } } } else if (intrinsicOwningType.Name == "Assembly" && intrinsicOwningType.Namespace == "System.Reflection") { if (intrinsicMethod.Signature.IsStatic && intrinsicMethod.Name == "GetExecutingAssembly") { ModuleDesc callsiteModule = (callsiteMethod.OwningType as MetadataType)?.Module; if (callsiteModule != null) { Debug.Assert(callsiteModule is IAssemblyDesc, "Multi-module assemblies"); return _assemblyGetExecutingAssemblyMethodThunks.GetHelper((IAssemblyDesc)callsiteModule); } } } else if (intrinsicOwningType.Name == "MethodBase" && intrinsicOwningType.Namespace == "System.Reflection") { if (intrinsicMethod.Signature.IsStatic && intrinsicMethod.Name == "GetCurrentMethod") { return _methodBaseGetCurrentMethodThunks.GetHelper(callsiteMethod).InstantiateAsOpen(); } } return intrinsicMethod; } public bool HasFixedSlotVTable(TypeDesc type) { return NodeFactory.VTable(type).HasFixedSlots; } public bool IsEffectivelySealed(TypeDesc type) { return _devirtualizationManager.IsEffectivelySealed(type); } public bool IsEffectivelySealed(MethodDesc method) { return _devirtualizationManager.IsEffectivelySealed(method); } public MethodDesc ResolveVirtualMethod(MethodDesc declMethod, TypeDesc implType, out CORINFO_DEVIRTUALIZATION_DETAIL devirtualizationDetail) { return _devirtualizationManager.ResolveVirtualMethod(declMethod, implType, out devirtualizationDetail); } public bool NeedsRuntimeLookup(ReadyToRunHelperId lookupKind, object targetOfLookup) { switch (lookupKind) { case ReadyToRunHelperId.TypeHandle: case ReadyToRunHelperId.NecessaryTypeHandle: case ReadyToRunHelperId.DefaultConstructor: case ReadyToRunHelperId.TypeHandleForCasting: case ReadyToRunHelperId.ObjectAllocator: return ((TypeDesc)targetOfLookup).IsRuntimeDeterminedSubtype; case ReadyToRunHelperId.MethodDictionary: case ReadyToRunHelperId.MethodEntry: case ReadyToRunHelperId.VirtualDispatchCell: case ReadyToRunHelperId.MethodHandle: return ((MethodDesc)targetOfLookup).IsRuntimeDeterminedExactMethod; case ReadyToRunHelperId.FieldHandle: return ((FieldDesc)targetOfLookup).OwningType.IsRuntimeDeterminedSubtype; default: throw new NotImplementedException(); } } public ReadyToRunHelperId GetLdTokenHelperForType(TypeDesc type) { bool canConstructPerWholeProgramAnalysis = _devirtualizationManager == null ? true : _devirtualizationManager.CanConstructType(type); return canConstructPerWholeProgramAnalysis & DependencyAnalysis.ConstructedEETypeNode.CreationAllowed(type) ? ReadyToRunHelperId.TypeHandle : ReadyToRunHelperId.NecessaryTypeHandle; } public static MethodDesc GetConstructorForCreateInstanceIntrinsic(TypeDesc type) { MethodDesc ctor = type.GetDefaultConstructor(); if (ctor == null) { MetadataType activatorType = type.Context.SystemModule.GetKnownType("System", "Activator"); if (type.IsValueType && type.GetParameterlessConstructor() == null) { ctor = activatorType.GetKnownNestedType("StructWithNoConstructor").GetKnownMethod(".ctor", null); } else { ctor = activatorType.GetKnownMethod("MissingConstructorMethod", null); } } return ctor; } public ISymbolNode ComputeConstantLookup(ReadyToRunHelperId lookupKind, object targetOfLookup) { switch (lookupKind) { case ReadyToRunHelperId.TypeHandle: return NodeFactory.ConstructedTypeSymbol((TypeDesc)targetOfLookup); case ReadyToRunHelperId.NecessaryTypeHandle: return NecessaryTypeSymbolIfPossible((TypeDesc)targetOfLookup); case ReadyToRunHelperId.TypeHandleForCasting: { var type = (TypeDesc)targetOfLookup; if (type.IsNullable) targetOfLookup = type.Instantiation[0]; return NecessaryTypeSymbolIfPossible((TypeDesc)targetOfLookup); } case ReadyToRunHelperId.MethodDictionary: return NodeFactory.MethodGenericDictionary((MethodDesc)targetOfLookup); case ReadyToRunHelperId.MethodEntry: return NodeFactory.FatFunctionPointer((MethodDesc)targetOfLookup); case ReadyToRunHelperId.MethodHandle: return NodeFactory.RuntimeMethodHandle((MethodDesc)targetOfLookup); case ReadyToRunHelperId.FieldHandle: return NodeFactory.RuntimeFieldHandle((FieldDesc)targetOfLookup); case ReadyToRunHelperId.DefaultConstructor: { var type = (TypeDesc)targetOfLookup; MethodDesc ctor = GetConstructorForCreateInstanceIntrinsic(type); return NodeFactory.CanonicalEntrypoint(ctor); } case ReadyToRunHelperId.ObjectAllocator: { var type = (TypeDesc)targetOfLookup; return NodeFactory.ExternSymbol(JitHelper.GetNewObjectHelperForType(type)); } default: throw new NotImplementedException(); } } public GenericDictionaryLookup ComputeGenericLookup(MethodDesc contextMethod, ReadyToRunHelperId lookupKind, object targetOfLookup) { if (targetOfLookup is TypeSystemEntity typeSystemEntity) { _nodeFactory.TypeSystemContext.DetectGenericCycles(contextMethod, typeSystemEntity); } GenericContextSource contextSource; if (contextMethod.RequiresInstMethodDescArg()) { contextSource = GenericContextSource.MethodParameter; } else if (contextMethod.RequiresInstMethodTableArg()) { contextSource = GenericContextSource.TypeParameter; } else { Debug.Assert(contextMethod.AcquiresInstMethodTableFromThis()); contextSource = GenericContextSource.ThisObject; } // // Some helpers represent logical concepts that might not be something that can be looked up in a dictionary // // Downgrade type handle for casting to a normal type handle if possible if (lookupKind == ReadyToRunHelperId.TypeHandleForCasting) { var type = (TypeDesc)targetOfLookup; if (!type.IsRuntimeDeterminedType || (!((RuntimeDeterminedType)type).CanonicalType.IsCanonicalDefinitionType(CanonicalFormKind.Universal) && !((RuntimeDeterminedType)type).CanonicalType.IsNullable)) { if (type.IsNullable) { targetOfLookup = type.Instantiation[0]; } lookupKind = ReadyToRunHelperId.NecessaryTypeHandle; } } // We don't have separate entries for necessary type handles to avoid possible duplication if (lookupKind == ReadyToRunHelperId.NecessaryTypeHandle) { lookupKind = ReadyToRunHelperId.TypeHandle; } // Can we do a fixed lookup? Start by checking if we can get to the dictionary. // Context source having a vtable with fixed slots is a prerequisite. if (contextSource == GenericContextSource.MethodParameter || HasFixedSlotVTable(contextMethod.OwningType)) { DictionaryLayoutNode dictionaryLayout; if (contextSource == GenericContextSource.MethodParameter) dictionaryLayout = _nodeFactory.GenericDictionaryLayout(contextMethod); else dictionaryLayout = _nodeFactory.GenericDictionaryLayout(contextMethod.OwningType); // If the dictionary layout has fixed slots, we can compute the lookup now. Otherwise defer to helper. if (dictionaryLayout.HasFixedSlots) { int pointerSize = _nodeFactory.Target.PointerSize; GenericLookupResult lookup = ReadyToRunGenericHelperNode.GetLookupSignature(_nodeFactory, lookupKind, targetOfLookup); int dictionarySlot = dictionaryLayout.GetSlotForFixedEntry(lookup); if (dictionarySlot != -1) { int dictionaryOffset = dictionarySlot * pointerSize; bool indirectLastOffset = lookup.LookupResultReferenceType(_nodeFactory) == GenericLookupResultReferenceType.Indirect; if (contextSource == GenericContextSource.MethodParameter) { return GenericDictionaryLookup.CreateFixedLookup(contextSource, dictionaryOffset, indirectLastOffset: indirectLastOffset); } else { int vtableSlot = VirtualMethodSlotHelper.GetGenericDictionarySlot(_nodeFactory, contextMethod.OwningType); int vtableOffset = EETypeNode.GetVTableOffset(pointerSize) + vtableSlot * pointerSize; return GenericDictionaryLookup.CreateFixedLookup(contextSource, vtableOffset, dictionaryOffset, indirectLastOffset: indirectLastOffset); } } } } // Fixed lookup not possible - use helper. return GenericDictionaryLookup.CreateHelperLookup(contextSource, lookupKind, targetOfLookup); } public bool IsFatPointerCandidate(MethodDesc containingMethod, MethodSignature signature) { // Unmanaged calls are never fat pointers if ((signature.Flags & MethodSignatureFlags.UnmanagedCallingConventionMask) != 0) return false; if (containingMethod.OwningType is MetadataType owningType) { // RawCalliHelper is a way for the class library to opt out of fat calls if (owningType.Name == "RawCalliHelper") return false; // Delegate invocation never needs fat calls if (owningType.IsDelegate && containingMethod.Name == "Invoke") return false; } return true; } /// <summary> /// Retreives method whose runtime handle is suitable for use with GVMLookupForSlot. /// </summary> public MethodDesc GetTargetOfGenericVirtualMethodCall(MethodDesc calledMethod) { // Should be a generic virtual method Debug.Assert(calledMethod.HasInstantiation && calledMethod.IsVirtual); // Needs to be either a concrete method, or a runtime determined form. Debug.Assert(!calledMethod.IsCanonicalMethod(CanonicalFormKind.Specific)); MethodDesc targetMethod = calledMethod.GetCanonMethodTarget(CanonicalFormKind.Specific); MethodDesc targetMethodDefinition = targetMethod.GetMethodDefinition(); MethodDesc slotNormalizedMethodDefinition = MetadataVirtualMethodAlgorithm.FindSlotDefiningMethodForVirtualMethod(targetMethodDefinition); // If the method defines the slot, we can use that. if (slotNormalizedMethodDefinition == targetMethodDefinition) { return calledMethod; } // Normalize to the slot defining method MethodDesc slotNormalizedMethod = TypeSystemContext.GetInstantiatedMethod( slotNormalizedMethodDefinition, targetMethod.Instantiation); // Since the slot normalization logic modified what method we're looking at, we need to compute the new target of lookup. // // If we could use virtual method resolution logic with runtime determined methods, we wouldn't need what we're going // to do below. MethodDesc runtimeDeterminedSlotNormalizedMethod; if (!slotNormalizedMethod.OwningType.IsCanonicalSubtype(CanonicalFormKind.Any)) { // If the owning type is not generic, we can use it as-is, potentially only replacing the runtime-determined // method instantiation part. runtimeDeterminedSlotNormalizedMethod = slotNormalizedMethod.GetMethodDefinition(); } else { // If we need a runtime lookup but a normalization to the slot defining method happened above, we need to compute // the runtime lookup in terms of the base type that introduced the slot. // // To do that, we walk the base hierarchy of the runtime determined thing, looking for a type definition that matches // the slot-normalized virtual method. We then find the method on that type. TypeDesc runtimeDeterminedOwningType = calledMethod.OwningType; Debug.Assert(!runtimeDeterminedOwningType.IsInterface); while (!slotNormalizedMethod.OwningType.HasSameTypeDefinition(runtimeDeterminedOwningType)) { TypeDesc runtimeDeterminedBaseTypeDefinition = runtimeDeterminedOwningType.GetTypeDefinition().BaseType; if (runtimeDeterminedBaseTypeDefinition.HasInstantiation) { runtimeDeterminedOwningType = runtimeDeterminedBaseTypeDefinition.InstantiateSignature(runtimeDeterminedOwningType.Instantiation, default); } else { runtimeDeterminedOwningType = runtimeDeterminedBaseTypeDefinition; } } // Now get the method on the newly found type Debug.Assert(runtimeDeterminedOwningType.HasInstantiation); runtimeDeterminedSlotNormalizedMethod = TypeSystemContext.GetMethodForInstantiatedType( slotNormalizedMethod.GetTypicalMethodDefinition(), (InstantiatedType)runtimeDeterminedOwningType); } return TypeSystemContext.GetInstantiatedMethod(runtimeDeterminedSlotNormalizedMethod, calledMethod.Instantiation); } CompilationResults ICompilation.Compile(string outputFile, ObjectDumper dumper) { if (dumper != null) { dumper.Begin(); } CompileInternal(outputFile, dumper); if (dumper != null) { dumper.End(); } return new CompilationResults(_dependencyGraph, _nodeFactory); } private sealed class ILCache : LockFreeReaderHashtable<MethodDesc, ILCache.MethodILData> { public ILProvider ILProvider { get; } public ILCache(ILProvider provider) { ILProvider = provider; } protected override int GetKeyHashCode(MethodDesc key) { return key.GetHashCode(); } protected override int GetValueHashCode(MethodILData value) { return value.Method.GetHashCode(); } protected override bool CompareKeyToValue(MethodDesc key, MethodILData value) { return Object.ReferenceEquals(key, value.Method); } protected override bool CompareValueToValue(MethodILData value1, MethodILData value2) { return Object.ReferenceEquals(value1.Method, value2.Method); } protected override MethodILData CreateValueFromKey(MethodDesc key) { return new MethodILData() { Method = key, MethodIL = ILProvider.GetMethodIL(key) }; } internal class MethodILData { public MethodDesc Method; public MethodIL MethodIL; } } private sealed class CombinedILProvider : ILProvider { private readonly ILProvider _primaryILProvider; private readonly PInvokeILProvider _pinvokeProvider; public CombinedILProvider(ILProvider primaryILProvider, PInvokeILProvider pinvokeILProvider) { _primaryILProvider = primaryILProvider; _pinvokeProvider = pinvokeILProvider; } public override MethodIL GetMethodIL(MethodDesc method) { MethodIL result = _primaryILProvider.GetMethodIL(method); if (result == null && method.IsPInvoke) result = _pinvokeProvider.GetMethodIL(method); return result; } } } // Interface under which Compilation is exposed externally. public interface ICompilation { CompilationResults Compile(string outputFileName, ObjectDumper dumper); } public class CompilationResults { private readonly DependencyAnalyzerBase<NodeFactory> _graph; protected readonly NodeFactory _factory; protected ImmutableArray<DependencyNodeCore<NodeFactory>> MarkedNodes { get { return _graph.MarkedNodeList; } } internal CompilationResults(DependencyAnalyzerBase<NodeFactory> graph, NodeFactory factory) { _graph = graph; _factory = factory; } public void WriteDependencyLog(string fileName) { using (FileStream dgmlOutput = new FileStream(fileName, FileMode.Create)) { DgmlWriter.WriteDependencyGraphToStream(dgmlOutput, _graph, _factory); dgmlOutput.Flush(); } } public IEnumerable<MethodDesc> CompiledMethodBodies { get { foreach (var node in MarkedNodes) { if (node is IMethodBodyNode) yield return ((IMethodBodyNode)node).Method; } } } public IEnumerable<TypeDesc> ConstructedEETypes { get { foreach (var node in MarkedNodes) { if (node is ConstructedEETypeNode || node is CanonicalEETypeNode) { yield return ((IEETypeNode)node).Type; } } } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Collections.Immutable; using System.IO; using System.Runtime.InteropServices; using ILCompiler.DependencyAnalysis; using ILCompiler.DependencyAnalysisFramework; using Internal.IL; using Internal.IL.Stubs; using Internal.TypeSystem; using Internal.TypeSystem.Ecma; using CORINFO_DEVIRTUALIZATION_DETAIL = Internal.JitInterface.CORINFO_DEVIRTUALIZATION_DETAIL; using Debug = System.Diagnostics.Debug; namespace ILCompiler { public abstract class Compilation : ICompilation { protected readonly DependencyAnalyzerBase<NodeFactory> _dependencyGraph; protected readonly NodeFactory _nodeFactory; protected readonly Logger _logger; protected readonly DebugInformationProvider _debugInformationProvider; protected readonly DevirtualizationManager _devirtualizationManager; private readonly IInliningPolicy _inliningPolicy; public NameMangler NameMangler => _nodeFactory.NameMangler; public NodeFactory NodeFactory => _nodeFactory; public CompilerTypeSystemContext TypeSystemContext => NodeFactory.TypeSystemContext; public Logger Logger => _logger; public PInvokeILProvider PInvokeILProvider { get; } private readonly TypeGetTypeMethodThunkCache _typeGetTypeMethodThunks; private readonly AssemblyGetExecutingAssemblyMethodThunkCache _assemblyGetExecutingAssemblyMethodThunks; private readonly MethodBaseGetCurrentMethodThunkCache _methodBaseGetCurrentMethodThunks; protected Compilation( DependencyAnalyzerBase<NodeFactory> dependencyGraph, NodeFactory nodeFactory, IEnumerable<ICompilationRootProvider> compilationRoots, ILProvider ilProvider, DebugInformationProvider debugInformationProvider, DevirtualizationManager devirtualizationManager, IInliningPolicy inliningPolicy, Logger logger) { _dependencyGraph = dependencyGraph; _nodeFactory = nodeFactory; _logger = logger; _debugInformationProvider = debugInformationProvider; _devirtualizationManager = devirtualizationManager; _inliningPolicy = inliningPolicy; _dependencyGraph.ComputeDependencyRoutine += ComputeDependencyNodeDependencies; NodeFactory.AttachToDependencyGraph(_dependencyGraph); var rootingService = new RootingServiceProvider(nodeFactory, _dependencyGraph.AddRoot); foreach (var rootProvider in compilationRoots) rootProvider.AddCompilationRoots(rootingService); MetadataType globalModuleGeneratedType = nodeFactory.TypeSystemContext.GeneratedAssembly.GetGlobalModuleType(); _typeGetTypeMethodThunks = new TypeGetTypeMethodThunkCache(globalModuleGeneratedType); _assemblyGetExecutingAssemblyMethodThunks = new AssemblyGetExecutingAssemblyMethodThunkCache(globalModuleGeneratedType); _methodBaseGetCurrentMethodThunks = new MethodBaseGetCurrentMethodThunkCache(); PInvokeILProvider = _nodeFactory.InteropStubManager.CreatePInvokeILProvider(); if (PInvokeILProvider != null) { ilProvider = new CombinedILProvider(ilProvider, PInvokeILProvider); } _methodILCache = new ILCache(ilProvider); } private ILCache _methodILCache; public virtual MethodIL GetMethodIL(MethodDesc method) { // Flush the cache when it grows too big if (_methodILCache.Count > 1000) _methodILCache = new ILCache(_methodILCache.ILProvider); return _methodILCache.GetOrCreateValue(method).MethodIL; } protected abstract void ComputeDependencyNodeDependencies(List<DependencyNodeCore<NodeFactory>> obj); protected abstract void CompileInternal(string outputFile, ObjectDumper dumper); public void DetectGenericCycles(MethodDesc caller, MethodDesc callee) { _nodeFactory.TypeSystemContext.DetectGenericCycles(caller, callee); } public virtual IEETypeNode NecessaryTypeSymbolIfPossible(TypeDesc type) { return _nodeFactory.NecessaryTypeSymbol(type); } public bool CanInline(MethodDesc caller, MethodDesc callee) { return _inliningPolicy.CanInline(caller, callee); } public bool CanConstructType(TypeDesc type) { return _devirtualizationManager.CanConstructType(type); } public DelegateCreationInfo GetDelegateCtor(TypeDesc delegateType, MethodDesc target, bool followVirtualDispatch) { // If we're creating a delegate to a virtual method that cannot be overriden, devirtualize. // This is not just an optimization - it's required for correctness in the presence of sealed // vtable slots. if (followVirtualDispatch && (target.IsFinal || target.OwningType.IsSealed())) followVirtualDispatch = false; if (followVirtualDispatch) target = MetadataVirtualMethodAlgorithm.FindSlotDefiningMethodForVirtualMethod(target); return DelegateCreationInfo.Create(delegateType, target, NodeFactory, followVirtualDispatch); } /// <summary> /// Gets an object representing the static data for RVA mapped fields from the PE image. /// </summary> public virtual ISymbolNode GetFieldRvaData(FieldDesc field) { if (field.GetType() == typeof(PInvokeLazyFixupField)) { return NodeFactory.PInvokeMethodFixup(new PInvokeMethodData((PInvokeLazyFixupField)field)); } else if (field is ExternSymbolMappedField externField) { return NodeFactory.ExternSymbol(externField.SymbolName); } else { // Use the typical field definition in case this is an instantiated generic type field = field.GetTypicalFieldDefinition(); int fieldTypePack = (field.FieldType as MetadataType)?.GetClassLayout().PackingSize ?? 1; return NodeFactory.ReadOnlyDataBlob(NameMangler.GetMangledFieldName(field), ((EcmaField)field).GetFieldRvaData(), Math.Max(NodeFactory.Target.PointerSize, fieldTypePack)); } } public bool HasLazyStaticConstructor(TypeDesc type) { return NodeFactory.PreinitializationManager.HasLazyStaticConstructor(type); } public MethodDebugInformation GetDebugInfo(MethodIL methodIL) { return _debugInformationProvider.GetDebugInfo(methodIL); } /// <summary> /// Resolves a reference to an intrinsic method to a new method that takes it's place in the compilation. /// This is used for intrinsics where the intrinsic expansion depends on the callsite. /// </summary> /// <param name="intrinsicMethod">The intrinsic method called.</param> /// <param name="callsiteMethod">The callsite that calls the intrinsic.</param> /// <returns>The intrinsic implementation to be called for this specific callsite.</returns> public MethodDesc ExpandIntrinsicForCallsite(MethodDesc intrinsicMethod, MethodDesc callsiteMethod) { Debug.Assert(intrinsicMethod.IsIntrinsic); var intrinsicOwningType = intrinsicMethod.OwningType as MetadataType; if (intrinsicOwningType == null) return intrinsicMethod; if (intrinsicOwningType.Module != TypeSystemContext.SystemModule) return intrinsicMethod; if (intrinsicOwningType.Name == "Type" && intrinsicOwningType.Namespace == "System") { if (intrinsicMethod.Signature.IsStatic && intrinsicMethod.Name == "GetType") { ModuleDesc callsiteModule = (callsiteMethod.OwningType as MetadataType)?.Module; if (callsiteModule != null) { Debug.Assert(callsiteModule is IAssemblyDesc, "Multi-module assemblies"); return _typeGetTypeMethodThunks.GetHelper(intrinsicMethod, ((IAssemblyDesc)callsiteModule).GetName().FullName); } } } else if (intrinsicOwningType.Name == "Assembly" && intrinsicOwningType.Namespace == "System.Reflection") { if (intrinsicMethod.Signature.IsStatic && intrinsicMethod.Name == "GetExecutingAssembly") { ModuleDesc callsiteModule = (callsiteMethod.OwningType as MetadataType)?.Module; if (callsiteModule != null) { Debug.Assert(callsiteModule is IAssemblyDesc, "Multi-module assemblies"); return _assemblyGetExecutingAssemblyMethodThunks.GetHelper((IAssemblyDesc)callsiteModule); } } } else if (intrinsicOwningType.Name == "MethodBase" && intrinsicOwningType.Namespace == "System.Reflection") { if (intrinsicMethod.Signature.IsStatic && intrinsicMethod.Name == "GetCurrentMethod") { return _methodBaseGetCurrentMethodThunks.GetHelper(callsiteMethod).InstantiateAsOpen(); } } return intrinsicMethod; } public bool HasFixedSlotVTable(TypeDesc type) { return NodeFactory.VTable(type).HasFixedSlots; } public bool IsEffectivelySealed(TypeDesc type) { return _devirtualizationManager.IsEffectivelySealed(type); } public bool IsEffectivelySealed(MethodDesc method) { return _devirtualizationManager.IsEffectivelySealed(method); } public MethodDesc ResolveVirtualMethod(MethodDesc declMethod, TypeDesc implType, out CORINFO_DEVIRTUALIZATION_DETAIL devirtualizationDetail) { return _devirtualizationManager.ResolveVirtualMethod(declMethod, implType, out devirtualizationDetail); } public bool NeedsRuntimeLookup(ReadyToRunHelperId lookupKind, object targetOfLookup) { switch (lookupKind) { case ReadyToRunHelperId.TypeHandle: case ReadyToRunHelperId.NecessaryTypeHandle: case ReadyToRunHelperId.DefaultConstructor: case ReadyToRunHelperId.TypeHandleForCasting: case ReadyToRunHelperId.ObjectAllocator: return ((TypeDesc)targetOfLookup).IsRuntimeDeterminedSubtype; case ReadyToRunHelperId.MethodDictionary: case ReadyToRunHelperId.MethodEntry: case ReadyToRunHelperId.VirtualDispatchCell: case ReadyToRunHelperId.MethodHandle: return ((MethodDesc)targetOfLookup).IsRuntimeDeterminedExactMethod; case ReadyToRunHelperId.FieldHandle: return ((FieldDesc)targetOfLookup).OwningType.IsRuntimeDeterminedSubtype; case ReadyToRunHelperId.ConstrainedDirectCall: return ((ConstrainedCallInfo)targetOfLookup).Method.IsRuntimeDeterminedExactMethod || ((ConstrainedCallInfo)targetOfLookup).ConstrainedType.IsRuntimeDeterminedSubtype; default: throw new NotImplementedException(); } } public ReadyToRunHelperId GetLdTokenHelperForType(TypeDesc type) { bool canConstructPerWholeProgramAnalysis = _devirtualizationManager == null ? true : _devirtualizationManager.CanConstructType(type); return canConstructPerWholeProgramAnalysis & DependencyAnalysis.ConstructedEETypeNode.CreationAllowed(type) ? ReadyToRunHelperId.TypeHandle : ReadyToRunHelperId.NecessaryTypeHandle; } public static MethodDesc GetConstructorForCreateInstanceIntrinsic(TypeDesc type) { MethodDesc ctor = type.GetDefaultConstructor(); if (ctor == null) { MetadataType activatorType = type.Context.SystemModule.GetKnownType("System", "Activator"); if (type.IsValueType && type.GetParameterlessConstructor() == null) { ctor = activatorType.GetKnownNestedType("StructWithNoConstructor").GetKnownMethod(".ctor", null); } else { ctor = activatorType.GetKnownMethod("MissingConstructorMethod", null); } } return ctor; } public ISymbolNode ComputeConstantLookup(ReadyToRunHelperId lookupKind, object targetOfLookup) { switch (lookupKind) { case ReadyToRunHelperId.TypeHandle: return NodeFactory.ConstructedTypeSymbol((TypeDesc)targetOfLookup); case ReadyToRunHelperId.NecessaryTypeHandle: return NecessaryTypeSymbolIfPossible((TypeDesc)targetOfLookup); case ReadyToRunHelperId.TypeHandleForCasting: { var type = (TypeDesc)targetOfLookup; if (type.IsNullable) targetOfLookup = type.Instantiation[0]; return NecessaryTypeSymbolIfPossible((TypeDesc)targetOfLookup); } case ReadyToRunHelperId.MethodDictionary: return NodeFactory.MethodGenericDictionary((MethodDesc)targetOfLookup); case ReadyToRunHelperId.MethodEntry: return NodeFactory.FatFunctionPointer((MethodDesc)targetOfLookup); case ReadyToRunHelperId.MethodHandle: return NodeFactory.RuntimeMethodHandle((MethodDesc)targetOfLookup); case ReadyToRunHelperId.FieldHandle: return NodeFactory.RuntimeFieldHandle((FieldDesc)targetOfLookup); case ReadyToRunHelperId.DefaultConstructor: { var type = (TypeDesc)targetOfLookup; MethodDesc ctor = GetConstructorForCreateInstanceIntrinsic(type); return NodeFactory.CanonicalEntrypoint(ctor); } case ReadyToRunHelperId.ObjectAllocator: { var type = (TypeDesc)targetOfLookup; return NodeFactory.ExternSymbol(JitHelper.GetNewObjectHelperForType(type)); } default: throw new NotImplementedException(); } } public GenericDictionaryLookup ComputeGenericLookup(MethodDesc contextMethod, ReadyToRunHelperId lookupKind, object targetOfLookup) { if (targetOfLookup is TypeSystemEntity typeSystemEntity) { _nodeFactory.TypeSystemContext.DetectGenericCycles(contextMethod, typeSystemEntity); } GenericContextSource contextSource; if (contextMethod.RequiresInstMethodDescArg()) { contextSource = GenericContextSource.MethodParameter; } else if (contextMethod.RequiresInstMethodTableArg()) { contextSource = GenericContextSource.TypeParameter; } else { Debug.Assert(contextMethod.AcquiresInstMethodTableFromThis()); contextSource = GenericContextSource.ThisObject; } // // Some helpers represent logical concepts that might not be something that can be looked up in a dictionary // // Downgrade type handle for casting to a normal type handle if possible if (lookupKind == ReadyToRunHelperId.TypeHandleForCasting) { var type = (TypeDesc)targetOfLookup; if (!type.IsRuntimeDeterminedType || (!((RuntimeDeterminedType)type).CanonicalType.IsCanonicalDefinitionType(CanonicalFormKind.Universal) && !((RuntimeDeterminedType)type).CanonicalType.IsNullable)) { if (type.IsNullable) { targetOfLookup = type.Instantiation[0]; } lookupKind = ReadyToRunHelperId.NecessaryTypeHandle; } } // We don't have separate entries for necessary type handles to avoid possible duplication if (lookupKind == ReadyToRunHelperId.NecessaryTypeHandle) { lookupKind = ReadyToRunHelperId.TypeHandle; } // Can we do a fixed lookup? Start by checking if we can get to the dictionary. // Context source having a vtable with fixed slots is a prerequisite. if (contextSource == GenericContextSource.MethodParameter || HasFixedSlotVTable(contextMethod.OwningType)) { DictionaryLayoutNode dictionaryLayout; if (contextSource == GenericContextSource.MethodParameter) dictionaryLayout = _nodeFactory.GenericDictionaryLayout(contextMethod); else dictionaryLayout = _nodeFactory.GenericDictionaryLayout(contextMethod.OwningType); // If the dictionary layout has fixed slots, we can compute the lookup now. Otherwise defer to helper. if (dictionaryLayout.HasFixedSlots) { int pointerSize = _nodeFactory.Target.PointerSize; GenericLookupResult lookup = ReadyToRunGenericHelperNode.GetLookupSignature(_nodeFactory, lookupKind, targetOfLookup); int dictionarySlot = dictionaryLayout.GetSlotForFixedEntry(lookup); if (dictionarySlot != -1) { int dictionaryOffset = dictionarySlot * pointerSize; bool indirectLastOffset = lookup.LookupResultReferenceType(_nodeFactory) == GenericLookupResultReferenceType.Indirect; if (contextSource == GenericContextSource.MethodParameter) { return GenericDictionaryLookup.CreateFixedLookup(contextSource, dictionaryOffset, indirectLastOffset: indirectLastOffset); } else { int vtableSlot = VirtualMethodSlotHelper.GetGenericDictionarySlot(_nodeFactory, contextMethod.OwningType); int vtableOffset = EETypeNode.GetVTableOffset(pointerSize) + vtableSlot * pointerSize; return GenericDictionaryLookup.CreateFixedLookup(contextSource, vtableOffset, dictionaryOffset, indirectLastOffset: indirectLastOffset); } } } } // Fixed lookup not possible - use helper. return GenericDictionaryLookup.CreateHelperLookup(contextSource, lookupKind, targetOfLookup); } public bool IsFatPointerCandidate(MethodDesc containingMethod, MethodSignature signature) { // Unmanaged calls are never fat pointers if ((signature.Flags & MethodSignatureFlags.UnmanagedCallingConventionMask) != 0) return false; if (containingMethod.OwningType is MetadataType owningType) { // RawCalliHelper is a way for the class library to opt out of fat calls if (owningType.Name == "RawCalliHelper") return false; // Delegate invocation never needs fat calls if (owningType.IsDelegate && containingMethod.Name == "Invoke") return false; } return true; } /// <summary> /// Retreives method whose runtime handle is suitable for use with GVMLookupForSlot. /// </summary> public MethodDesc GetTargetOfGenericVirtualMethodCall(MethodDesc calledMethod) { // Should be a generic virtual method Debug.Assert(calledMethod.HasInstantiation && calledMethod.IsVirtual); // Needs to be either a concrete method, or a runtime determined form. Debug.Assert(!calledMethod.IsCanonicalMethod(CanonicalFormKind.Specific)); MethodDesc targetMethod = calledMethod.GetCanonMethodTarget(CanonicalFormKind.Specific); MethodDesc targetMethodDefinition = targetMethod.GetMethodDefinition(); MethodDesc slotNormalizedMethodDefinition = MetadataVirtualMethodAlgorithm.FindSlotDefiningMethodForVirtualMethod(targetMethodDefinition); // If the method defines the slot, we can use that. if (slotNormalizedMethodDefinition == targetMethodDefinition) { return calledMethod; } // Normalize to the slot defining method MethodDesc slotNormalizedMethod = TypeSystemContext.GetInstantiatedMethod( slotNormalizedMethodDefinition, targetMethod.Instantiation); // Since the slot normalization logic modified what method we're looking at, we need to compute the new target of lookup. // // If we could use virtual method resolution logic with runtime determined methods, we wouldn't need what we're going // to do below. MethodDesc runtimeDeterminedSlotNormalizedMethod; if (!slotNormalizedMethod.OwningType.IsCanonicalSubtype(CanonicalFormKind.Any)) { // If the owning type is not generic, we can use it as-is, potentially only replacing the runtime-determined // method instantiation part. runtimeDeterminedSlotNormalizedMethod = slotNormalizedMethod.GetMethodDefinition(); } else { // If we need a runtime lookup but a normalization to the slot defining method happened above, we need to compute // the runtime lookup in terms of the base type that introduced the slot. // // To do that, we walk the base hierarchy of the runtime determined thing, looking for a type definition that matches // the slot-normalized virtual method. We then find the method on that type. TypeDesc runtimeDeterminedOwningType = calledMethod.OwningType; Debug.Assert(!runtimeDeterminedOwningType.IsInterface); while (!slotNormalizedMethod.OwningType.HasSameTypeDefinition(runtimeDeterminedOwningType)) { TypeDesc runtimeDeterminedBaseTypeDefinition = runtimeDeterminedOwningType.GetTypeDefinition().BaseType; if (runtimeDeterminedBaseTypeDefinition.HasInstantiation) { runtimeDeterminedOwningType = runtimeDeterminedBaseTypeDefinition.InstantiateSignature(runtimeDeterminedOwningType.Instantiation, default); } else { runtimeDeterminedOwningType = runtimeDeterminedBaseTypeDefinition; } } // Now get the method on the newly found type Debug.Assert(runtimeDeterminedOwningType.HasInstantiation); runtimeDeterminedSlotNormalizedMethod = TypeSystemContext.GetMethodForInstantiatedType( slotNormalizedMethod.GetTypicalMethodDefinition(), (InstantiatedType)runtimeDeterminedOwningType); } return TypeSystemContext.GetInstantiatedMethod(runtimeDeterminedSlotNormalizedMethod, calledMethod.Instantiation); } CompilationResults ICompilation.Compile(string outputFile, ObjectDumper dumper) { if (dumper != null) { dumper.Begin(); } CompileInternal(outputFile, dumper); if (dumper != null) { dumper.End(); } return new CompilationResults(_dependencyGraph, _nodeFactory); } private sealed class ILCache : LockFreeReaderHashtable<MethodDesc, ILCache.MethodILData> { public ILProvider ILProvider { get; } public ILCache(ILProvider provider) { ILProvider = provider; } protected override int GetKeyHashCode(MethodDesc key) { return key.GetHashCode(); } protected override int GetValueHashCode(MethodILData value) { return value.Method.GetHashCode(); } protected override bool CompareKeyToValue(MethodDesc key, MethodILData value) { return Object.ReferenceEquals(key, value.Method); } protected override bool CompareValueToValue(MethodILData value1, MethodILData value2) { return Object.ReferenceEquals(value1.Method, value2.Method); } protected override MethodILData CreateValueFromKey(MethodDesc key) { return new MethodILData() { Method = key, MethodIL = ILProvider.GetMethodIL(key) }; } internal class MethodILData { public MethodDesc Method; public MethodIL MethodIL; } } private sealed class CombinedILProvider : ILProvider { private readonly ILProvider _primaryILProvider; private readonly PInvokeILProvider _pinvokeProvider; public CombinedILProvider(ILProvider primaryILProvider, PInvokeILProvider pinvokeILProvider) { _primaryILProvider = primaryILProvider; _pinvokeProvider = pinvokeILProvider; } public override MethodIL GetMethodIL(MethodDesc method) { MethodIL result = _primaryILProvider.GetMethodIL(method); if (result == null && method.IsPInvoke) result = _pinvokeProvider.GetMethodIL(method); return result; } } } // Interface under which Compilation is exposed externally. public interface ICompilation { CompilationResults Compile(string outputFileName, ObjectDumper dumper); } public class CompilationResults { private readonly DependencyAnalyzerBase<NodeFactory> _graph; protected readonly NodeFactory _factory; protected ImmutableArray<DependencyNodeCore<NodeFactory>> MarkedNodes { get { return _graph.MarkedNodeList; } } internal CompilationResults(DependencyAnalyzerBase<NodeFactory> graph, NodeFactory factory) { _graph = graph; _factory = factory; } public void WriteDependencyLog(string fileName) { using (FileStream dgmlOutput = new FileStream(fileName, FileMode.Create)) { DgmlWriter.WriteDependencyGraphToStream(dgmlOutput, _graph, _factory); dgmlOutput.Flush(); } } public IEnumerable<MethodDesc> CompiledMethodBodies { get { foreach (var node in MarkedNodes) { if (node is IMethodBodyNode) yield return ((IMethodBodyNode)node).Method; } } } public IEnumerable<TypeDesc> ConstructedEETypes { get { foreach (var node in MarkedNodes) { if (node is ConstructedEETypeNode || node is CanonicalEETypeNode) { yield return ((IEETypeNode)node).Type; } } } } } public sealed class ConstrainedCallInfo { public readonly TypeDesc ConstrainedType; public readonly MethodDesc Method; public ConstrainedCallInfo(TypeDesc constrainedType, MethodDesc method) => (ConstrainedType, Method) = (constrainedType, method); } }
1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/EETypeNode.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using Internal.IL; using Internal.Runtime; using Internal.Text; using Internal.TypeSystem; using Debug = System.Diagnostics.Debug; using GenericVariance = Internal.Runtime.GenericVariance; namespace ILCompiler.DependencyAnalysis { /// <summary> /// Given a type, EETypeNode writes an MethodTable data structure in the format expected by the runtime. /// /// Format of an MethodTable: /// /// Field Size | Contents /// ----------------+----------------------------------- /// UInt16 | Component Size. For arrays this is the element type size, for strings it is 2 (.NET uses /// | UTF16 character encoding), for generic type definitions it is the number of generic parameters, /// | and 0 for all other types. /// | /// UInt16 | EETypeKind (Normal, Array, Pointer type). Flags for: IsValueType, IsCrossModule, HasPointers, /// | HasOptionalFields, IsInterface, IsGeneric. Top 5 bits are used for enum EETypeElementType to /// | record whether it's back by an Int32, Int16 etc /// | /// Uint32 | Base size. /// | /// [Pointer Size] | Related type. Base type for regular types. Element type for arrays / pointer types. /// | /// UInt16 | Number of VTable slots (X) /// | /// UInt16 | Number of interfaces implemented by type (Y) /// | /// UInt32 | Hash code /// | /// X * [Ptr Size] | VTable entries (optional) /// | /// Y * [Ptr Size] | Pointers to interface map data structures (optional) /// | /// [Relative ptr] | Pointer to containing TypeManager indirection cell /// | /// [Relative ptr] | Pointer to writable data /// | /// [Relative ptr] | Pointer to finalizer method (optional) /// | /// [Relative ptr] | Pointer to optional fields (optional) /// | /// [Relative ptr] | Pointer to the generic type definition MethodTable (optional) /// | /// [Relative ptr] | Pointer to the generic argument and variance info (optional) /// </summary> public partial class EETypeNode : ObjectNode, IEETypeNode, ISymbolDefinitionNode, ISymbolNodeWithLinkage { protected readonly TypeDesc _type; internal readonly EETypeOptionalFieldsBuilder _optionalFieldsBuilder = new EETypeOptionalFieldsBuilder(); internal readonly EETypeOptionalFieldsNode _optionalFieldsNode; protected bool? _mightHaveInterfaceDispatchMap; private bool _hasConditionalDependenciesFromMetadataManager; public EETypeNode(NodeFactory factory, TypeDesc type) { if (type.IsCanonicalDefinitionType(CanonicalFormKind.Any)) Debug.Assert(this is CanonicalDefinitionEETypeNode); else if (type.IsCanonicalSubtype(CanonicalFormKind.Any)) Debug.Assert((this is CanonicalEETypeNode) || (this is NecessaryCanonicalEETypeNode)); Debug.Assert(!type.IsRuntimeDeterminedSubtype); _type = type; _optionalFieldsNode = new EETypeOptionalFieldsNode(this); _hasConditionalDependenciesFromMetadataManager = factory.MetadataManager.HasConditionalDependenciesDueToEETypePresence(type); factory.TypeSystemContext.EnsureLoadableType(type); // We don't have a representation for function pointers right now if (WithoutParameterizeTypes(type).IsFunctionPointer) ThrowHelper.ThrowTypeLoadException(ExceptionStringID.ClassLoadGeneral, type); static TypeDesc WithoutParameterizeTypes(TypeDesc t) => t is ParameterizedType pt ? WithoutParameterizeTypes(pt.ParameterType) : t; } protected bool MightHaveInterfaceDispatchMap(NodeFactory factory) { if (!_mightHaveInterfaceDispatchMap.HasValue) { _mightHaveInterfaceDispatchMap = EmitVirtualSlotsAndInterfaces && InterfaceDispatchMapNode.MightHaveInterfaceDispatchMap(_type, factory); } return _mightHaveInterfaceDispatchMap.Value; } protected override string GetName(NodeFactory factory) => this.GetMangledName(factory.NameMangler); public override bool ShouldSkipEmittingObjectNode(NodeFactory factory) { // If there is a constructed version of this node in the graph, emit that instead if (ConstructedEETypeNode.CreationAllowed(_type)) return factory.ConstructedTypeSymbol(_type).Marked; return false; } public virtual ISymbolNode NodeForLinkage(NodeFactory factory) { return factory.NecessaryTypeSymbol(_type); } public TypeDesc Type => _type; public override ObjectNodeSection Section { get { if (_type.Context.Target.IsWindows) return ObjectNodeSection.ReadOnlyDataSection; else return ObjectNodeSection.DataSection; } } public int MinimumObjectSize => _type.Context.Target.PointerSize * 3; protected virtual bool EmitVirtualSlotsAndInterfaces => false; public override bool InterestingForDynamicDependencyAnalysis { get { if (!EmitVirtualSlotsAndInterfaces) return false; if (_type.IsInterface) return false; if (_type.IsDefType) { // First, check if this type has any GVM that overrides a GVM on a parent type. If that's the case, this makes // the current type interesting for GVM analysis (i.e. instantiate its overriding GVMs for existing GVMDependenciesNodes // of the instantiated GVM on the parent types). foreach (var method in _type.GetAllVirtualMethods()) { Debug.Assert(method.IsVirtual); if (method.HasInstantiation) { MethodDesc slotDecl = MetadataVirtualMethodAlgorithm.FindSlotDefiningMethodForVirtualMethod(method); if (slotDecl != method) return true; } } // Second, check if this type has any GVMs that implement any GVM on any of the implemented interfaces. This would // make the current type interesting for dynamic dependency analysis to that we can instantiate its GVMs. foreach (DefType interfaceImpl in _type.RuntimeInterfaces) { foreach (var method in interfaceImpl.GetAllVirtualMethods()) { Debug.Assert(method.IsVirtual); if (method.HasInstantiation) { // We found a GVM on one of the implemented interfaces. Find if the type implements this method. // (Note, do this comparision against the generic definition of the method, not the specific method instantiation MethodDesc genericDefinition = method.GetMethodDefinition(); MethodDesc slotDecl = _type.ResolveInterfaceMethodTarget(genericDefinition); if (slotDecl != null) { // If the type doesn't introduce this interface method implementation (i.e. the same implementation // already exists in the base type), do not consider this type interesting for GVM analysis just yet. // // We need to limit the number of types that are interesting for GVM analysis at all costs since // these all will be looked at for every unique generic virtual method call in the program. // Having a long list of interesting types affects the compilation throughput heavily. if (slotDecl.OwningType == _type || _type.BaseType.ResolveInterfaceMethodTarget(genericDefinition) != slotDecl) { return true; } } else { // The method could be implemented by a default interface method var resolution = _type.ResolveInterfaceMethodToDefaultImplementationOnType(genericDefinition, out slotDecl); if (resolution == DefaultInterfaceMethodResolution.DefaultImplementation) { return true; } } } } } } return false; } } internal bool HasOptionalFields { get { return _optionalFieldsBuilder.IsAtLeastOneFieldUsed(); } } internal byte[] GetOptionalFieldsData() { return _optionalFieldsBuilder.GetBytes(); } public override bool StaticDependenciesAreComputed => true; public static string GetMangledName(TypeDesc type, NameMangler nameMangler) { return nameMangler.NodeMangler.MethodTable(type); } public virtual void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append(nameMangler.NodeMangler.MethodTable(_type)); } int ISymbolNode.Offset => 0; int ISymbolDefinitionNode.Offset => GCDescSize; public override bool IsShareable => IsTypeNodeShareable(_type); private bool CanonFormTypeMayExist { get { if (!_type.HasInstantiation) return false; if (!_type.Context.SupportsCanon) return false; // If type is already in canon form, a canonically equivalent type cannot exist if (_type.IsCanonicalSubtype(CanonicalFormKind.Any)) return false; // If we reach here, a universal canon variant can exist (if universal canon is supported) if (_type.Context.SupportsUniversalCanon) return true; // Attempt to convert to canon. If the type changes, then the CanonForm exists return (_type.ConvertToCanonForm(CanonicalFormKind.Specific) != _type); } } public sealed override bool HasConditionalStaticDependencies { get { // If the type is can be converted to some interesting canon type, and this is the non-constructed variant of an MethodTable // we may need to trigger the fully constructed type to exist to make the behavior of the type consistent // in reflection and generic template expansion scenarios if (CanonFormTypeMayExist) { return true; } if (!EmitVirtualSlotsAndInterfaces) return false; // Since the vtable is dependency driven, generate conditional static dependencies for // all possible vtable entries. // // The conditional dependencies conditionally add the implementation of the virtual method // if the virtual method is used. // // We walk the inheritance chain because abstract bases would only add a "tentative" // method body of the implementation that can be trimmed away if no other type uses it. DefType currentType = _type.GetClosestDefType(); while (currentType != null) { if (currentType == _type || (currentType is MetadataType mdType && mdType.IsAbstract)) { foreach (var method in currentType.GetAllVirtualMethods()) { // Abstract methods don't have a body associated with it so there's no conditional // dependency to add. // Generic virtual methods are tracked by an orthogonal mechanism. if (!method.IsAbstract && !method.HasInstantiation) return true; } } currentType = currentType.BaseType; } // If the type implements at least one interface, calls against that interface could result in this type's // implementation being used. if (_type.RuntimeInterfaces.Length > 0) return true; return _hasConditionalDependenciesFromMetadataManager; } } public sealed override IEnumerable<CombinedDependencyListEntry> GetConditionalStaticDependencies(NodeFactory factory) { List<CombinedDependencyListEntry> result = new List<CombinedDependencyListEntry>(); IEETypeNode maximallyConstructableType = factory.MaximallyConstructableType(_type); if (maximallyConstructableType != this) { // MethodTable upgrading from necessary to constructed if some template instantation exists that matches up // This ensures we don't end up having two EETypes in the system (one is this necessary type, and another one // that was dynamically created at runtime). if (CanonFormTypeMayExist) { result.Add(new CombinedDependencyListEntry(maximallyConstructableType, factory.MaximallyConstructableType(_type.ConvertToCanonForm(CanonicalFormKind.Specific)), "Trigger full type generation if canonical form exists")); if (_type.Context.SupportsUniversalCanon) result.Add(new CombinedDependencyListEntry(maximallyConstructableType, factory.MaximallyConstructableType(_type.ConvertToCanonForm(CanonicalFormKind.Universal)), "Trigger full type generation if universal canonical form exists")); } return result; } if (!EmitVirtualSlotsAndInterfaces) return result; DefType defType = _type.GetClosestDefType(); // If we're producing a full vtable, none of the dependencies are conditional. if (!factory.VTable(defType).HasFixedSlots) { bool isNonInterfaceAbstractType = !defType.IsInterface && ((MetadataType)defType).IsAbstract; foreach (MethodDesc decl in defType.EnumAllVirtualSlots()) { // Generic virtual methods are tracked by an orthogonal mechanism. if (decl.HasInstantiation) continue; MethodDesc impl = defType.FindVirtualFunctionTargetMethodOnObjectType(decl); bool implOwnerIsAbstract = ((MetadataType)impl.OwningType).IsAbstract; // We add a conditional dependency in two situations: // 1. The implementation is on this type. This is pretty obvious. // 2. The implementation comes from an abstract base type. We do this // because abstract types only request a TentativeMethodEntrypoint of the implementation. // The actual method body of this entrypoint might still be trimmed away. // We don't need to do this for implementations from non-abstract bases since // non-abstract types will create a hard conditional reference to their virtual // method implementations. // // We also skip abstract methods since they don't have a body to refer to. if ((impl.OwningType == defType || implOwnerIsAbstract) && !impl.IsAbstract) { MethodDesc canonImpl = impl.GetCanonMethodTarget(CanonicalFormKind.Specific); // If this is an abstract type, only request a tentative entrypoint (whose body // might just be stubbed out). This lets us avoid generating method bodies for // virtual method on abstract types that are overriden in all their children. // // We don't do this if the method can be placed in the sealed vtable since // those can never be overriden by children anyway. bool canUseTentativeMethod = isNonInterfaceAbstractType && !decl.CanMethodBeInSealedVTable() && factory.CompilationModuleGroup.AllowVirtualMethodOnAbstractTypeOptimization(canonImpl); IMethodNode implNode = canUseTentativeMethod ? factory.TentativeMethodEntrypoint(canonImpl, impl.OwningType.IsValueType) : factory.MethodEntrypoint(canonImpl, impl.OwningType.IsValueType); result.Add(new CombinedDependencyListEntry(implNode, factory.VirtualMethodUse(decl), "Virtual method")); } if (impl.OwningType == defType) { factory.MetadataManager.NoteOverridingMethod(decl, impl); } } Debug.Assert( _type == defType || ((System.Collections.IStructuralEquatable)defType.RuntimeInterfaces).Equals(_type.RuntimeInterfaces, EqualityComparer<DefType>.Default)); // Add conditional dependencies for interface methods the type implements. For example, if the type T implements // interface IFoo which has a method M1, add a dependency on T.M1 dependent on IFoo.M1 being called, since it's // possible for any IFoo object to actually be an instance of T. DefType[] defTypeRuntimeInterfaces = defType.RuntimeInterfaces; for (int interfaceIndex = 0; interfaceIndex < defTypeRuntimeInterfaces.Length; interfaceIndex++) { DefType interfaceType = defTypeRuntimeInterfaces[interfaceIndex]; Debug.Assert(interfaceType.IsInterface); bool isVariantInterfaceImpl = VariantInterfaceMethodUseNode.IsVariantInterfaceImplementation(factory, _type, interfaceType); foreach (MethodDesc interfaceMethod in interfaceType.GetAllVirtualMethods()) { // Generic virtual methods are tracked by an orthogonal mechanism. if (interfaceMethod.HasInstantiation) continue; // Static virtual methods are resolved at compile time if (interfaceMethod.Signature.IsStatic) continue; MethodDesc implMethod = defType.ResolveInterfaceMethodToVirtualMethodOnType(interfaceMethod); if (implMethod != null) { result.Add(new CombinedDependencyListEntry(factory.VirtualMethodUse(implMethod), factory.VirtualMethodUse(interfaceMethod), "Interface method")); // If any of the implemented interfaces have variance, calls against compatible interface methods // could result in interface methods of this type being used (e.g. IEnumerable<object>.GetEnumerator() // can dispatch to an implementation of IEnumerable<string>.GetEnumerator()). if (isVariantInterfaceImpl) { MethodDesc typicalInterfaceMethod = interfaceMethod.GetTypicalMethodDefinition(); result.Add(new CombinedDependencyListEntry(factory.VirtualMethodUse(implMethod), factory.VariantInterfaceMethodUse(typicalInterfaceMethod), "Interface method")); result.Add(new CombinedDependencyListEntry(factory.VirtualMethodUse(interfaceMethod), factory.VariantInterfaceMethodUse(typicalInterfaceMethod), "Interface method")); } factory.MetadataManager.NoteOverridingMethod(interfaceMethod, implMethod); } else { // Is the implementation provided by a default interface method? // If so, add a dependency on the entrypoint directly since nobody else is going to do that // (interface types have an empty vtable, modulo their generic dictionary). TypeDesc interfaceOnDefinition = defType.GetTypeDefinition().RuntimeInterfaces[interfaceIndex]; MethodDesc interfaceMethodDefinition = interfaceMethod; if (!interfaceType.IsTypeDefinition) interfaceMethodDefinition = factory.TypeSystemContext.GetMethodForInstantiatedType(interfaceMethod.GetTypicalMethodDefinition(), (InstantiatedType)interfaceOnDefinition); var resolution = defType.GetTypeDefinition().ResolveInterfaceMethodToDefaultImplementationOnType(interfaceMethodDefinition, out implMethod); if (resolution == DefaultInterfaceMethodResolution.DefaultImplementation) { DefType providingInterfaceDefinitionType = (DefType)implMethod.OwningType; implMethod = implMethod.InstantiateSignature(defType.Instantiation, Instantiation.Empty); MethodDesc defaultIntfMethod = implMethod.GetCanonMethodTarget(CanonicalFormKind.Specific); if (defaultIntfMethod.IsCanonicalMethod(CanonicalFormKind.Any)) { defaultIntfMethod = factory.TypeSystemContext.GetDefaultInterfaceMethodImplementationThunk(defaultIntfMethod, _type.ConvertToCanonForm(CanonicalFormKind.Specific), providingInterfaceDefinitionType); } result.Add(new CombinedDependencyListEntry(factory.MethodEntrypoint(defaultIntfMethod), factory.VirtualMethodUse(interfaceMethod), "Interface method")); factory.MetadataManager.NoteOverridingMethod(interfaceMethod, implMethod); } } } } } factory.MetadataManager.GetConditionalDependenciesDueToEETypePresence(ref result, factory, _type); return result; } public static bool IsTypeNodeShareable(TypeDesc type) { return type.IsParameterizedType || type.IsFunctionPointer || type is InstantiatedType; } internal static bool MethodHasNonGenericILMethodBody(MethodDesc method) { // Generic methods have their own generic dictionaries if (method.HasInstantiation) return false; // Abstract methods don't have a body if (method.IsAbstract) return false; // PInvoke methods are not permitted on generic types, // but let's not crash the compilation because of that. if (method.IsPInvoke) return false; // CoreRT can generate method bodies for these no matter what (worst case // they'll be throwing). We don't want to take the "return false" code path on CoreRT because // delegate methods fall into the runtime implemented category on CoreRT, but we // just treat them like regular method bodies. return true; } protected override DependencyList ComputeNonRelocationBasedDependencies(NodeFactory factory) { DependencyList dependencies = new DependencyList(); // Include the optional fields by default. We don't know if optional fields will be needed until // all of the interface usage has been stabilized. If we end up not needing it, the MethodTable node will not // generate any relocs to it, and the optional fields node will instruct the object writer to skip // emitting it. dependencies.Add(new DependencyListEntry(_optionalFieldsNode, "Optional fields")); // TODO-SIZE: We probably don't need to add these for all EETypes StaticsInfoHashtableNode.AddStaticsInfoDependencies(ref dependencies, factory, _type); if (EmitVirtualSlotsAndInterfaces) { if (!_type.IsArrayTypeWithoutGenericInterfaces()) { // Sealed vtables have relative pointers, so to minimize size, we build sealed vtables for the canonical types dependencies.Add(new DependencyListEntry(factory.SealedVTable(_type.ConvertToCanonForm(CanonicalFormKind.Specific)), "Sealed Vtable")); } // Also add the un-normalized vtable slices of implemented interfaces. // This is important to do in the scanning phase so that the compilation phase can find // vtable information for things like IEnumerator<List<__Canon>>. foreach (TypeDesc intface in _type.RuntimeInterfaces) dependencies.Add(factory.VTable(intface), "Interface vtable slice"); // Generated type contains generic virtual methods that will get added to the GVM tables if (TypeGVMEntriesNode.TypeNeedsGVMTableEntries(_type)) { dependencies.Add(new DependencyListEntry(factory.TypeGVMEntries(_type.GetTypeDefinition()), "Type with generic virtual methods")); AddDependenciesForUniversalGVMSupport(factory, _type, ref dependencies); TypeDesc canonicalType = _type.ConvertToCanonForm(CanonicalFormKind.Specific); if (canonicalType != _type) dependencies.Add(factory.ConstructedTypeSymbol(canonicalType), "Type with generic virtual methods"); } } if (factory.CompilationModuleGroup.PresenceOfEETypeImpliesAllMethodsOnType(_type)) { if (_type.IsArray || _type.IsDefType) { // If the compilation group wants this type to be fully promoted, ensure that all non-generic methods of the // type are generated. // This may be done for several reasons: // - The MethodTable may be going to be COMDAT folded with other EETypes generated in a different object file // This means their generic dictionaries need to have identical contents. The only way to achieve that is // by generating the entries for all methods that contribute to the dictionary, and sorting the dictionaries. // - The generic type may be imported into another module, in which case the generic dictionary imported // must represent all of the methods, as the set of used methods cannot be known at compile time // - As a matter of policy, the type and its methods may be exported for use in another module. The policy // may wish to specify that if a type is to be placed into a shared module, all of the methods associated with // it should be also be exported. foreach (var method in _type.GetClosestDefType().ConvertToCanonForm(CanonicalFormKind.Specific).GetAllMethods()) { if (!MethodHasNonGenericILMethodBody(method)) continue; dependencies.Add(factory.MethodEntrypoint(method.GetCanonMethodTarget(CanonicalFormKind.Specific)), "Ensure all methods on type due to CompilationModuleGroup policy"); } } } if (!ConstructedEETypeNode.CreationAllowed(_type)) { // If necessary MethodTable is the highest load level for this type, ask the metadata manager // if we have any dependencies due to reflectability. factory.MetadataManager.GetDependenciesDueToReflectability(ref dependencies, factory, _type); // If necessary MethodTable is the highest load level, consider this a module use if (_type is MetadataType mdType && mdType.Module.GetGlobalModuleType().GetStaticConstructor() is MethodDesc moduleCctor) { dependencies.Add(factory.MethodEntrypoint(moduleCctor), "Type in a module with initializer"); } } return dependencies; } public override ObjectData GetData(NodeFactory factory, bool relocsOnly) { ObjectDataBuilder objData = new ObjectDataBuilder(factory, relocsOnly); objData.RequireInitialPointerAlignment(); objData.AddSymbol(this); ComputeOptionalEETypeFields(factory, relocsOnly); OutputGCDesc(ref objData); OutputComponentSize(ref objData); OutputFlags(factory, ref objData); objData.EmitInt(BaseSize); OutputRelatedType(factory, ref objData); // Number of vtable slots will be only known later. Reseve the bytes for it. var vtableSlotCountReservation = objData.ReserveShort(); // Number of interfaces will only be known later. Reserve the bytes for it. var interfaceCountReservation = objData.ReserveShort(); objData.EmitInt(_type.GetHashCode()); if (EmitVirtualSlotsAndInterfaces) { // Emit VTable Debug.Assert(objData.CountBytes - ((ISymbolDefinitionNode)this).Offset == GetVTableOffset(objData.TargetPointerSize)); SlotCounter virtualSlotCounter = SlotCounter.BeginCounting(ref /* readonly */ objData); OutputVirtualSlots(factory, ref objData, _type, _type, _type, relocsOnly); // Update slot count int numberOfVtableSlots = virtualSlotCounter.CountSlots(ref /* readonly */ objData); objData.EmitShort(vtableSlotCountReservation, checked((short)numberOfVtableSlots)); // Emit interface map SlotCounter interfaceSlotCounter = SlotCounter.BeginCounting(ref /* readonly */ objData); OutputInterfaceMap(factory, ref objData); // Update slot count int numberOfInterfaceSlots = interfaceSlotCounter.CountSlots(ref /* readonly */ objData); objData.EmitShort(interfaceCountReservation, checked((short)numberOfInterfaceSlots)); } else { // If we're not emitting any slots, the number of slots is zero. objData.EmitShort(vtableSlotCountReservation, 0); objData.EmitShort(interfaceCountReservation, 0); } OutputTypeManagerIndirection(factory, ref objData); OutputWritableData(factory, ref objData); OutputFinalizerMethod(factory, ref objData); OutputOptionalFields(factory, ref objData); OutputSealedVTable(factory, relocsOnly, ref objData); OutputGenericInstantiationDetails(factory, ref objData); return objData.ToObjectData(); } /// <summary> /// Returns the offset within an MethodTable of the beginning of VTable entries /// </summary> /// <param name="pointerSize">The size of a pointer in bytes in the target architecture</param> public static int GetVTableOffset(int pointerSize) { return 16 + pointerSize; } protected virtual int GCDescSize => 0; protected virtual void OutputGCDesc(ref ObjectDataBuilder builder) { // Non-constructed EETypeNodes get no GC Desc Debug.Assert(GCDescSize == 0); } private void OutputComponentSize(ref ObjectDataBuilder objData) { if (_type.IsArray) { TypeDesc elementType = ((ArrayType)_type).ElementType; if (elementType == elementType.Context.UniversalCanonType) { objData.EmitShort(0); } else { int elementSize = elementType.GetElementSize().AsInt; // We validated that this will fit the short when the node was constructed. No need for nice messages. objData.EmitShort((short)checked((ushort)elementSize)); } } else if (_type.IsString) { objData.EmitShort(StringComponentSize.Value); } else { objData.EmitShort(0); } } private void OutputFlags(NodeFactory factory, ref ObjectDataBuilder objData) { UInt16 flags = EETypeBuilderHelpers.ComputeFlags(_type); if (_type.GetTypeDefinition() == factory.ArrayOfTEnumeratorType) { // Generic array enumerators use special variance rules recognized by the runtime flags |= (UInt16)EETypeFlags.GenericVarianceFlag; } if (factory.TypeSystemContext.IsGenericArrayInterfaceType(_type)) { // Runtime casting logic relies on all interface types implemented on arrays // to have the variant flag set (even if all the arguments are non-variant). // This supports e.g. casting uint[] to ICollection<int> flags |= (UInt16)EETypeFlags.GenericVarianceFlag; } if (_type.IsIDynamicInterfaceCastable) { flags |= (UInt16)EETypeFlags.IDynamicInterfaceCastableFlag; } ISymbolNode relatedTypeNode = GetRelatedTypeNode(factory); // If the related type (base type / array element type / pointee type) is not part of this compilation group, and // the output binaries will be multi-file (not multiple object files linked together), indicate to the runtime // that it should indirect through the import address table if (relatedTypeNode != null && relatedTypeNode.RepresentsIndirectionCell) { flags |= (UInt16)EETypeFlags.RelatedTypeViaIATFlag; } if (HasOptionalFields) { flags |= (UInt16)EETypeFlags.OptionalFieldsFlag; } if (this is ClonedConstructedEETypeNode) { flags |= (UInt16)EETypeKind.ClonedEEType; } objData.EmitShort((short)flags); } protected virtual int BaseSize { get { int pointerSize = _type.Context.Target.PointerSize; int objectSize; if (_type.IsDefType) { LayoutInt instanceByteCount = ((DefType)_type).InstanceByteCount; if (instanceByteCount.IsIndeterminate) { // Some value must be put in, but the specific value doesn't matter as it // isn't used for specific instantiations, and the universal canon MethodTable // is never associated with an allocated object. objectSize = pointerSize; } else { objectSize = pointerSize + ((DefType)_type).InstanceByteCount.AsInt; // +pointerSize for SyncBlock } if (_type.IsValueType) objectSize += pointerSize; // + EETypePtr field inherited from System.Object } else if (_type.IsArray) { objectSize = 3 * pointerSize; // SyncBlock + EETypePtr + Length if (_type.IsMdArray) objectSize += 2 * sizeof(int) * ((ArrayType)_type).Rank; } else if (_type.IsPointer) { // These never get boxed and don't have a base size. Use a sentinel value recognized by the runtime. return ParameterizedTypeShapeConstants.Pointer; } else if (_type.IsByRef) { // These never get boxed and don't have a base size. Use a sentinel value recognized by the runtime. return ParameterizedTypeShapeConstants.ByRef; } else throw new NotImplementedException(); objectSize = AlignmentHelper.AlignUp(objectSize, pointerSize); objectSize = Math.Max(MinimumObjectSize, objectSize); if (_type.IsString) { // If this is a string, throw away objectSize we computed so far. Strings are special. // SyncBlock + EETypePtr + length + firstChar objectSize = 2 * pointerSize + sizeof(int) + StringComponentSize.Value; } return objectSize; } } protected virtual ISymbolNode GetBaseTypeNode(NodeFactory factory) { return _type.BaseType != null ? factory.NecessaryTypeSymbol(_type.BaseType) : null; } private ISymbolNode GetRelatedTypeNode(NodeFactory factory) { ISymbolNode relatedTypeNode = null; if (_type.IsArray || _type.IsPointer || _type.IsByRef) { var parameterType = ((ParameterizedType)_type).ParameterType; relatedTypeNode = factory.NecessaryTypeSymbol(parameterType); } else { TypeDesc baseType = _type.BaseType; if (baseType != null) { relatedTypeNode = GetBaseTypeNode(factory); } } return relatedTypeNode; } protected virtual void OutputRelatedType(NodeFactory factory, ref ObjectDataBuilder objData) { ISymbolNode relatedTypeNode = GetRelatedTypeNode(factory); if (relatedTypeNode != null) { objData.EmitPointerReloc(relatedTypeNode); } else { objData.EmitZeroPointer(); } } private void OutputVirtualSlots(NodeFactory factory, ref ObjectDataBuilder objData, TypeDesc implType, TypeDesc declType, TypeDesc templateType, bool relocsOnly) { Debug.Assert(EmitVirtualSlotsAndInterfaces); declType = declType.GetClosestDefType(); templateType = templateType.ConvertToCanonForm(CanonicalFormKind.Specific); var baseType = declType.BaseType; if (baseType != null) { Debug.Assert(templateType.BaseType != null); OutputVirtualSlots(factory, ref objData, implType, baseType, templateType.BaseType, relocsOnly); } // // In the universal canonical types case, we could have base types in the hierarchy that are partial universal canonical types. // The presence of these types could cause incorrect vtable layouts, so we need to fully canonicalize them and walk the // hierarchy of the template type of the original input type to detect these cases. // // Exmaple: we begin with Derived<__UniversalCanon> and walk the template hierarchy: // // class Derived<T> : Middle<T, MyStruct> { } // -> Template is Derived<__UniversalCanon> and needs a dictionary slot // // -> Basetype tempalte is Middle<__UniversalCanon, MyStruct>. It's a partial // Universal canonical type, so we need to fully canonicalize it. // // class Middle<T, U> : Base<U> { } // -> Template is Middle<__UniversalCanon, __UniversalCanon> and needs a dictionary slot // // -> Basetype template is Base<__UniversalCanon> // // class Base<T> { } // -> Template is Base<__UniversalCanon> and needs a dictionary slot. // // If we had not fully canonicalized the Middle class template, we would have ended up with Base<MyStruct>, which does not need // a dictionary slot, meaning we would have created a vtable layout that the runtime does not expect. // // The generic dictionary pointer occupies the first slot of each type vtable slice if (declType.HasGenericDictionarySlot() || templateType.HasGenericDictionarySlot()) { // All generic interface types have a dictionary slot, but only some of them have an actual dictionary. bool isInterfaceWithAnEmptySlot = declType.IsInterface && declType.ConvertToCanonForm(CanonicalFormKind.Specific) == declType; // Note: Canonical type instantiations always have a generic dictionary vtable slot, but it's empty // Note: If the current EETypeNode represents a universal canonical type, any dictionary slot must be empty if (declType.IsCanonicalSubtype(CanonicalFormKind.Any) || implType.IsCanonicalSubtype(CanonicalFormKind.Universal) || factory.LazyGenericsPolicy.UsesLazyGenerics(declType) || isInterfaceWithAnEmptySlot) objData.EmitZeroPointer(); else objData.EmitPointerReloc(factory.TypeGenericDictionary(declType)); } VTableSliceNode declVTable = factory.VTable(declType); // It's only okay to touch the actual list of slots if we're in the final emission phase // or the vtable is not built lazily. if (relocsOnly && !declVTable.HasFixedSlots) return; // Inteface types don't place anything else in their physical vtable. // Interfaces have logical slots for their methods but since they're all abstract, they would be zero. // We place default implementations of interface methods into the vtable of the interface-implementing // type, pretending there was an extra virtual slot. if (_type.IsInterface) return; // Actual vtable slots follow IReadOnlyList<MethodDesc> virtualSlots = declVTable.Slots; for (int i = 0; i < virtualSlots.Count; i++) { MethodDesc declMethod = virtualSlots[i]; // Object.Finalize shouldn't get a virtual slot. Finalizer is stored in an optional field // instead: most MethodTable don't have a finalizer, but all EETypes contain Object's vtable. // This lets us save a pointer (+reloc) on most EETypes. Debug.Assert(!declType.IsObject || declMethod.Name != "Finalize"); // No generic virtual methods can appear in the vtable! Debug.Assert(!declMethod.HasInstantiation); MethodDesc implMethod = implType.GetClosestDefType().FindVirtualFunctionTargetMethodOnObjectType(declMethod); // Final NewSlot methods cannot be overridden, and therefore can be placed in the sealed-vtable to reduce the size of the vtable // of this type and any type that inherits from it. if (declMethod.CanMethodBeInSealedVTable() && !declType.IsArrayTypeWithoutGenericInterfaces()) continue; if (!implMethod.IsAbstract) { MethodDesc canonImplMethod = implMethod.GetCanonMethodTarget(CanonicalFormKind.Specific); // If the type we're generating now is abstract, and the implementation comes from an abstract type, // only use a tentative method entrypoint that can have its body replaced by a throwing stub // if no "hard" reference to that entrypoint exists in the program. // This helps us to eliminate method bodies for virtual methods on abstract types that are fully overriden // in the children of that abstract type. bool canUseTentativeEntrypoint = implType is MetadataType mdImplType && mdImplType.IsAbstract && !mdImplType.IsInterface && implMethod.OwningType is MetadataType mdImplMethodType && mdImplMethodType.IsAbstract && factory.CompilationModuleGroup.AllowVirtualMethodOnAbstractTypeOptimization(canonImplMethod); IMethodNode implSymbol = canUseTentativeEntrypoint ? factory.TentativeMethodEntrypoint(canonImplMethod, implMethod.OwningType.IsValueType) : factory.MethodEntrypoint(canonImplMethod, implMethod.OwningType.IsValueType); objData.EmitPointerReloc(implSymbol); } else { objData.EmitZeroPointer(); } } } protected virtual IEETypeNode GetInterfaceTypeNode(NodeFactory factory, TypeDesc interfaceType) { return factory.NecessaryTypeSymbol(interfaceType); } protected virtual void OutputInterfaceMap(NodeFactory factory, ref ObjectDataBuilder objData) { Debug.Assert(EmitVirtualSlotsAndInterfaces); foreach (var itf in _type.RuntimeInterfaces) { objData.EmitPointerRelocOrIndirectionReference(GetInterfaceTypeNode(factory, itf)); } } private void OutputFinalizerMethod(NodeFactory factory, ref ObjectDataBuilder objData) { if (_type.HasFinalizer) { MethodDesc finalizerMethod = _type.GetFinalizer(); MethodDesc canonFinalizerMethod = finalizerMethod.GetCanonMethodTarget(CanonicalFormKind.Specific); if (factory.Target.SupportsRelativePointers) objData.EmitReloc(factory.MethodEntrypoint(canonFinalizerMethod), RelocType.IMAGE_REL_BASED_RELPTR32); else objData.EmitPointerReloc(factory.MethodEntrypoint(canonFinalizerMethod)); } } protected void OutputTypeManagerIndirection(NodeFactory factory, ref ObjectDataBuilder objData) { if (factory.Target.SupportsRelativePointers) objData.EmitReloc(factory.TypeManagerIndirection, RelocType.IMAGE_REL_BASED_RELPTR32); else objData.EmitPointerReloc(factory.TypeManagerIndirection); } protected void OutputWritableData(NodeFactory factory, ref ObjectDataBuilder objData) { if (factory.Target.SupportsRelativePointers) { Utf8StringBuilder writableDataBlobName = new Utf8StringBuilder(); writableDataBlobName.Append("__writableData"); writableDataBlobName.Append(factory.NameMangler.GetMangledTypeName(_type)); BlobNode blob = factory.UninitializedWritableDataBlob(writableDataBlobName.ToUtf8String(), WritableData.GetSize(factory.Target.PointerSize), WritableData.GetAlignment(factory.Target.PointerSize)); objData.EmitReloc(blob, RelocType.IMAGE_REL_BASED_RELPTR32); } } protected void OutputOptionalFields(NodeFactory factory, ref ObjectDataBuilder objData) { if (HasOptionalFields) { if (factory.Target.SupportsRelativePointers) objData.EmitReloc(_optionalFieldsNode, RelocType.IMAGE_REL_BASED_RELPTR32); else objData.EmitPointerReloc(_optionalFieldsNode); } } private void OutputSealedVTable(NodeFactory factory, bool relocsOnly, ref ObjectDataBuilder objData) { if (EmitVirtualSlotsAndInterfaces && !_type.IsArrayTypeWithoutGenericInterfaces()) { // Sealed vtables have relative pointers, so to minimize size, we build sealed vtables for the canonical types SealedVTableNode sealedVTable = factory.SealedVTable(_type.ConvertToCanonForm(CanonicalFormKind.Specific)); if (sealedVTable.BuildSealedVTableSlots(factory, relocsOnly) && sealedVTable.NumSealedVTableEntries > 0) { if (factory.Target.SupportsRelativePointers) objData.EmitReloc(sealedVTable, RelocType.IMAGE_REL_BASED_RELPTR32); else objData.EmitPointerReloc(sealedVTable); } } } private void OutputGenericInstantiationDetails(NodeFactory factory, ref ObjectDataBuilder objData) { if (_type.HasInstantiation && !_type.IsTypeDefinition) { IEETypeNode typeDefNode = factory.NecessaryTypeSymbol(_type.GetTypeDefinition()); if (factory.Target.SupportsRelativePointers) objData.EmitRelativeRelocOrIndirectionReference(typeDefNode); else objData.EmitPointerRelocOrIndirectionReference(typeDefNode); GenericCompositionDetails details; if (_type.GetTypeDefinition() == factory.ArrayOfTEnumeratorType) { // Generic array enumerators use special variance rules recognized by the runtime details = new GenericCompositionDetails(_type.Instantiation, new[] { GenericVariance.ArrayCovariant }); } else if (factory.TypeSystemContext.IsGenericArrayInterfaceType(_type)) { // Runtime casting logic relies on all interface types implemented on arrays // to have the variant flag set (even if all the arguments are non-variant). // This supports e.g. casting uint[] to ICollection<int> details = new GenericCompositionDetails(_type, forceVarianceInfo: true); } else details = new GenericCompositionDetails(_type); ISymbolNode compositionNode = factory.GenericComposition(details); if (factory.Target.SupportsRelativePointers) objData.EmitReloc(compositionNode, RelocType.IMAGE_REL_BASED_RELPTR32); else objData.EmitPointerReloc(compositionNode); } } /// <summary> /// Populate the OptionalFieldsRuntimeBuilder if any optional fields are required. /// </summary> protected internal virtual void ComputeOptionalEETypeFields(NodeFactory factory, bool relocsOnly) { if (!relocsOnly && MightHaveInterfaceDispatchMap(factory)) { _optionalFieldsBuilder.SetFieldValue(EETypeOptionalFieldTag.DispatchMap, checked((uint)factory.InterfaceDispatchMapIndirection(Type).IndexFromBeginningOfArray)); } ComputeRareFlags(factory, relocsOnly); ComputeNullableValueOffset(); ComputeValueTypeFieldPadding(); } void ComputeRareFlags(NodeFactory factory, bool relocsOnly) { uint flags = 0; MetadataType metadataType = _type as MetadataType; if (factory.PreinitializationManager.HasLazyStaticConstructor(_type)) { flags |= (uint)EETypeRareFlags.HasCctorFlag; } if (_type.RequiresAlign8()) { flags |= (uint)EETypeRareFlags.RequiresAlign8Flag; } TargetArchitecture targetArch = _type.Context.Target.Architecture; if (metadataType != null && (targetArch == TargetArchitecture.ARM || targetArch == TargetArchitecture.ARM64) && metadataType.IsHomogeneousAggregate) { flags |= (uint)EETypeRareFlags.IsHFAFlag; } if (metadataType != null && !_type.IsInterface && metadataType.IsAbstract) { flags |= (uint)EETypeRareFlags.IsAbstractClassFlag; } if (_type.IsByRefLike) { flags |= (uint)EETypeRareFlags.IsByRefLikeFlag; } if (EmitVirtualSlotsAndInterfaces && !_type.IsArrayTypeWithoutGenericInterfaces()) { SealedVTableNode sealedVTable = factory.SealedVTable(_type.ConvertToCanonForm(CanonicalFormKind.Specific)); if (sealedVTable.BuildSealedVTableSlots(factory, relocsOnly) && sealedVTable.NumSealedVTableEntries > 0) flags |= (uint)EETypeRareFlags.HasSealedVTableEntriesFlag; } if (flags != 0) { _optionalFieldsBuilder.SetFieldValue(EETypeOptionalFieldTag.RareFlags, flags); } } /// <summary> /// To support boxing / unboxing, the offset of the value field of a Nullable type is recorded on the MethodTable. /// This is variable according to the alignment requirements of the Nullable&lt;T&gt; type parameter. /// </summary> void ComputeNullableValueOffset() { if (!_type.IsNullable) return; if (!_type.Instantiation[0].IsCanonicalSubtype(CanonicalFormKind.Universal)) { var field = _type.GetKnownField("value"); // In the definition of Nullable<T>, the first field should be the boolean representing "hasValue" Debug.Assert(field.Offset.AsInt > 0); // The contract with the runtime states the Nullable value offset is stored with the boolean "hasValue" size subtracted // to get a small encoding size win. _optionalFieldsBuilder.SetFieldValue(EETypeOptionalFieldTag.NullableValueOffset, (uint)field.Offset.AsInt - 1); } } protected virtual void ComputeValueTypeFieldPadding() { // All objects that can have appreciable which can be derived from size compute ValueTypeFieldPadding. // Unfortunately, the name ValueTypeFieldPadding is now wrong to avoid integration conflicts. // Interfaces, sealed types, and non-DefTypes cannot be derived from if (_type.IsInterface || !_type.IsDefType || (_type.IsSealed() && !_type.IsValueType)) return; DefType defType = _type as DefType; Debug.Assert(defType != null); uint valueTypeFieldPaddingEncoded; if (defType.InstanceByteCount.IsIndeterminate) { valueTypeFieldPaddingEncoded = EETypeBuilderHelpers.ComputeValueTypeFieldPaddingFieldValue(0, 1, _type.Context.Target.PointerSize); } else { int numInstanceFieldBytes = defType.InstanceByteCountUnaligned.AsInt; // Check if we have a type derived from System.ValueType or System.Enum, but not System.Enum itself if (defType.IsValueType) { // Value types should have at least 1 byte of size Debug.Assert(numInstanceFieldBytes >= 1); // The size doesn't currently include the MethodTable pointer size. We need to add this so that // the number of instance field bytes consistently represents the boxed size. numInstanceFieldBytes += _type.Context.Target.PointerSize; } // For unboxing to work correctly and for supporting dynamic type loading for derived types we need // to record the actual size of the fields of a type without any padding for GC heap allocation (since // we can unbox into locals or arrays where this padding is not used, and because field layout for derived // types is effected by the unaligned base size). We don't want to store this information for all EETypes // since it's only relevant for value types, and derivable types so it's added as an optional field. It's // also enough to simply store the size of the padding (between 0 and 4 or 8 bytes for 32-bit and 0 and 8 or 16 bytes // for 64-bit) which cuts down our storage requirements. uint valueTypeFieldPadding = checked((uint)((BaseSize - _type.Context.Target.PointerSize) - numInstanceFieldBytes)); valueTypeFieldPaddingEncoded = EETypeBuilderHelpers.ComputeValueTypeFieldPaddingFieldValue(valueTypeFieldPadding, (uint)defType.InstanceFieldAlignment.AsInt, _type.Context.Target.PointerSize); } if (valueTypeFieldPaddingEncoded != 0) { _optionalFieldsBuilder.SetFieldValue(EETypeOptionalFieldTag.ValueTypeFieldPadding, valueTypeFieldPaddingEncoded); } } protected override void OnMarked(NodeFactory context) { if (!context.IsCppCodegenTemporaryWorkaround) { Debug.Assert(_type.IsTypeDefinition || !_type.HasSameTypeDefinition(context.ArrayOfTClass), "Asking for Array<T> MethodTable"); } } public static void AddDependenciesForStaticsNode(NodeFactory factory, TypeDesc type, ref DependencyList dependencies) { // To ensure that the behvior of FieldInfo.GetValue/SetValue remains correct, // if a type may be reflectable, and it is generic, if a canonical instantiation of reflection // can exist which can refer to the associated type of this static base, ensure that type // has an MethodTable. (Which will allow the static field lookup logic to find the right type) if (type.HasInstantiation && !factory.MetadataManager.IsReflectionBlocked(type)) { // TODO-SIZE: This current implementation is slightly generous, as it does not attempt to restrict // the created types to the maximum extent by investigating reflection data and such. Here we just // check if we support use of a canonically equivalent type to perform reflection. // We don't check to see if reflection is enabled on the type. if (factory.TypeSystemContext.SupportsUniversalCanon || (factory.TypeSystemContext.SupportsCanon && (type != type.ConvertToCanonForm(CanonicalFormKind.Specific)))) { if (dependencies == null) dependencies = new DependencyList(); dependencies.Add(factory.NecessaryTypeSymbol(type), "Static block owning type is necessary for canonically equivalent reflection"); } } } protected static void AddDependenciesForUniversalGVMSupport(NodeFactory factory, TypeDesc type, ref DependencyList dependencies) { if (factory.TypeSystemContext.SupportsUniversalCanon) { foreach (MethodDesc method in type.GetVirtualMethods()) { if (!method.HasInstantiation) continue; if (method.IsAbstract) continue; TypeDesc[] universalCanonArray = new TypeDesc[method.Instantiation.Length]; for (int i = 0; i < universalCanonArray.Length; i++) universalCanonArray[i] = factory.TypeSystemContext.UniversalCanonType; MethodDesc universalCanonMethodNonCanonicalized = method.MakeInstantiatedMethod(new Instantiation(universalCanonArray)); MethodDesc universalCanonGVMMethod = universalCanonMethodNonCanonicalized.GetCanonMethodTarget(CanonicalFormKind.Universal); if (dependencies == null) dependencies = new DependencyList(); dependencies.Add(new DependencyListEntry(factory.MethodEntrypoint(universalCanonGVMMethod), "USG GVM Method")); } } } public override int ClassCode => 1521789141; public override int CompareToImpl(ISortableNode other, CompilerComparer comparer) { return comparer.Compare(_type, ((EETypeNode)other)._type); } public override string ToString() { return _type.ToString(); } private struct SlotCounter { private int _startBytes; public static SlotCounter BeginCounting(ref /* readonly */ ObjectDataBuilder builder) => new SlotCounter { _startBytes = builder.CountBytes }; public int CountSlots(ref /* readonly */ ObjectDataBuilder builder) { int bytesEmitted = builder.CountBytes - _startBytes; Debug.Assert(bytesEmitted % builder.TargetPointerSize == 0); return bytesEmitted / builder.TargetPointerSize; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using Internal.IL; using Internal.Runtime; using Internal.Text; using Internal.TypeSystem; using Debug = System.Diagnostics.Debug; using GenericVariance = Internal.Runtime.GenericVariance; namespace ILCompiler.DependencyAnalysis { /// <summary> /// Given a type, EETypeNode writes an MethodTable data structure in the format expected by the runtime. /// /// Format of an MethodTable: /// /// Field Size | Contents /// ----------------+----------------------------------- /// UInt16 | Component Size. For arrays this is the element type size, for strings it is 2 (.NET uses /// | UTF16 character encoding), for generic type definitions it is the number of generic parameters, /// | and 0 for all other types. /// | /// UInt16 | EETypeKind (Normal, Array, Pointer type). Flags for: IsValueType, IsCrossModule, HasPointers, /// | HasOptionalFields, IsInterface, IsGeneric. Top 5 bits are used for enum EETypeElementType to /// | record whether it's back by an Int32, Int16 etc /// | /// Uint32 | Base size. /// | /// [Pointer Size] | Related type. Base type for regular types. Element type for arrays / pointer types. /// | /// UInt16 | Number of VTable slots (X) /// | /// UInt16 | Number of interfaces implemented by type (Y) /// | /// UInt32 | Hash code /// | /// X * [Ptr Size] | VTable entries (optional) /// | /// Y * [Ptr Size] | Pointers to interface map data structures (optional) /// | /// [Relative ptr] | Pointer to containing TypeManager indirection cell /// | /// [Relative ptr] | Pointer to writable data /// | /// [Relative ptr] | Pointer to finalizer method (optional) /// | /// [Relative ptr] | Pointer to optional fields (optional) /// | /// [Relative ptr] | Pointer to the generic type definition MethodTable (optional) /// | /// [Relative ptr] | Pointer to the generic argument and variance info (optional) /// </summary> public partial class EETypeNode : ObjectNode, IEETypeNode, ISymbolDefinitionNode, ISymbolNodeWithLinkage { protected readonly TypeDesc _type; internal readonly EETypeOptionalFieldsBuilder _optionalFieldsBuilder = new EETypeOptionalFieldsBuilder(); internal readonly EETypeOptionalFieldsNode _optionalFieldsNode; protected bool? _mightHaveInterfaceDispatchMap; private bool _hasConditionalDependenciesFromMetadataManager; public EETypeNode(NodeFactory factory, TypeDesc type) { if (type.IsCanonicalDefinitionType(CanonicalFormKind.Any)) Debug.Assert(this is CanonicalDefinitionEETypeNode); else if (type.IsCanonicalSubtype(CanonicalFormKind.Any)) Debug.Assert((this is CanonicalEETypeNode) || (this is NecessaryCanonicalEETypeNode)); Debug.Assert(!type.IsRuntimeDeterminedSubtype); _type = type; _optionalFieldsNode = new EETypeOptionalFieldsNode(this); _hasConditionalDependenciesFromMetadataManager = factory.MetadataManager.HasConditionalDependenciesDueToEETypePresence(type); factory.TypeSystemContext.EnsureLoadableType(type); // We don't have a representation for function pointers right now if (WithoutParameterizeTypes(type).IsFunctionPointer) ThrowHelper.ThrowTypeLoadException(ExceptionStringID.ClassLoadGeneral, type); static TypeDesc WithoutParameterizeTypes(TypeDesc t) => t is ParameterizedType pt ? WithoutParameterizeTypes(pt.ParameterType) : t; } protected bool MightHaveInterfaceDispatchMap(NodeFactory factory) { if (!_mightHaveInterfaceDispatchMap.HasValue) { _mightHaveInterfaceDispatchMap = EmitVirtualSlotsAndInterfaces && InterfaceDispatchMapNode.MightHaveInterfaceDispatchMap(_type, factory); } return _mightHaveInterfaceDispatchMap.Value; } protected override string GetName(NodeFactory factory) => this.GetMangledName(factory.NameMangler); public override bool ShouldSkipEmittingObjectNode(NodeFactory factory) { // If there is a constructed version of this node in the graph, emit that instead if (ConstructedEETypeNode.CreationAllowed(_type)) return factory.ConstructedTypeSymbol(_type).Marked; return false; } public virtual ISymbolNode NodeForLinkage(NodeFactory factory) { return factory.NecessaryTypeSymbol(_type); } public TypeDesc Type => _type; public override ObjectNodeSection Section { get { if (_type.Context.Target.IsWindows) return ObjectNodeSection.ReadOnlyDataSection; else return ObjectNodeSection.DataSection; } } public int MinimumObjectSize => _type.Context.Target.PointerSize * 3; protected virtual bool EmitVirtualSlotsAndInterfaces => false; public override bool InterestingForDynamicDependencyAnalysis { get { if (!EmitVirtualSlotsAndInterfaces) return false; if (_type.IsInterface) return false; if (_type.IsDefType) { // First, check if this type has any GVM that overrides a GVM on a parent type. If that's the case, this makes // the current type interesting for GVM analysis (i.e. instantiate its overriding GVMs for existing GVMDependenciesNodes // of the instantiated GVM on the parent types). foreach (var method in _type.GetAllVirtualMethods()) { Debug.Assert(method.IsVirtual); if (method.HasInstantiation) { MethodDesc slotDecl = MetadataVirtualMethodAlgorithm.FindSlotDefiningMethodForVirtualMethod(method); if (slotDecl != method) return true; } } // Second, check if this type has any GVMs that implement any GVM on any of the implemented interfaces. This would // make the current type interesting for dynamic dependency analysis to that we can instantiate its GVMs. foreach (DefType interfaceImpl in _type.RuntimeInterfaces) { foreach (var method in interfaceImpl.GetAllVirtualMethods()) { Debug.Assert(method.IsVirtual); // Static interface methods don't participate in GVM analysis if (method.Signature.IsStatic) continue; if (method.HasInstantiation) { // We found a GVM on one of the implemented interfaces. Find if the type implements this method. // (Note, do this comparision against the generic definition of the method, not the specific method instantiation MethodDesc genericDefinition = method.GetMethodDefinition(); MethodDesc slotDecl = _type.ResolveInterfaceMethodTarget(genericDefinition); if (slotDecl != null) { // If the type doesn't introduce this interface method implementation (i.e. the same implementation // already exists in the base type), do not consider this type interesting for GVM analysis just yet. // // We need to limit the number of types that are interesting for GVM analysis at all costs since // these all will be looked at for every unique generic virtual method call in the program. // Having a long list of interesting types affects the compilation throughput heavily. if (slotDecl.OwningType == _type || _type.BaseType.ResolveInterfaceMethodTarget(genericDefinition) != slotDecl) { return true; } } else { // The method could be implemented by a default interface method var resolution = _type.ResolveInterfaceMethodToDefaultImplementationOnType(genericDefinition, out slotDecl); if (resolution == DefaultInterfaceMethodResolution.DefaultImplementation) { return true; } } } } } } return false; } } internal bool HasOptionalFields { get { return _optionalFieldsBuilder.IsAtLeastOneFieldUsed(); } } internal byte[] GetOptionalFieldsData() { return _optionalFieldsBuilder.GetBytes(); } public override bool StaticDependenciesAreComputed => true; public static string GetMangledName(TypeDesc type, NameMangler nameMangler) { return nameMangler.NodeMangler.MethodTable(type); } public virtual void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append(nameMangler.NodeMangler.MethodTable(_type)); } int ISymbolNode.Offset => 0; int ISymbolDefinitionNode.Offset => GCDescSize; public override bool IsShareable => IsTypeNodeShareable(_type); private bool CanonFormTypeMayExist { get { if (!_type.HasInstantiation) return false; if (!_type.Context.SupportsCanon) return false; // If type is already in canon form, a canonically equivalent type cannot exist if (_type.IsCanonicalSubtype(CanonicalFormKind.Any)) return false; // If we reach here, a universal canon variant can exist (if universal canon is supported) if (_type.Context.SupportsUniversalCanon) return true; // Attempt to convert to canon. If the type changes, then the CanonForm exists return (_type.ConvertToCanonForm(CanonicalFormKind.Specific) != _type); } } public sealed override bool HasConditionalStaticDependencies { get { // If the type is can be converted to some interesting canon type, and this is the non-constructed variant of an MethodTable // we may need to trigger the fully constructed type to exist to make the behavior of the type consistent // in reflection and generic template expansion scenarios if (CanonFormTypeMayExist) { return true; } if (!EmitVirtualSlotsAndInterfaces) return false; // Since the vtable is dependency driven, generate conditional static dependencies for // all possible vtable entries. // // The conditional dependencies conditionally add the implementation of the virtual method // if the virtual method is used. // // We walk the inheritance chain because abstract bases would only add a "tentative" // method body of the implementation that can be trimmed away if no other type uses it. DefType currentType = _type.GetClosestDefType(); while (currentType != null) { if (currentType == _type || (currentType is MetadataType mdType && mdType.IsAbstract)) { foreach (var method in currentType.GetAllVirtualMethods()) { // Abstract methods don't have a body associated with it so there's no conditional // dependency to add. // Generic virtual methods are tracked by an orthogonal mechanism. if (!method.IsAbstract && !method.HasInstantiation) return true; } } currentType = currentType.BaseType; } // If the type implements at least one interface, calls against that interface could result in this type's // implementation being used. if (_type.RuntimeInterfaces.Length > 0) return true; return _hasConditionalDependenciesFromMetadataManager; } } public sealed override IEnumerable<CombinedDependencyListEntry> GetConditionalStaticDependencies(NodeFactory factory) { List<CombinedDependencyListEntry> result = new List<CombinedDependencyListEntry>(); IEETypeNode maximallyConstructableType = factory.MaximallyConstructableType(_type); if (maximallyConstructableType != this) { // MethodTable upgrading from necessary to constructed if some template instantation exists that matches up // This ensures we don't end up having two EETypes in the system (one is this necessary type, and another one // that was dynamically created at runtime). if (CanonFormTypeMayExist) { result.Add(new CombinedDependencyListEntry(maximallyConstructableType, factory.MaximallyConstructableType(_type.ConvertToCanonForm(CanonicalFormKind.Specific)), "Trigger full type generation if canonical form exists")); if (_type.Context.SupportsUniversalCanon) result.Add(new CombinedDependencyListEntry(maximallyConstructableType, factory.MaximallyConstructableType(_type.ConvertToCanonForm(CanonicalFormKind.Universal)), "Trigger full type generation if universal canonical form exists")); } return result; } if (!EmitVirtualSlotsAndInterfaces) return result; DefType defType = _type.GetClosestDefType(); // If we're producing a full vtable, none of the dependencies are conditional. if (!factory.VTable(defType).HasFixedSlots) { bool isNonInterfaceAbstractType = !defType.IsInterface && ((MetadataType)defType).IsAbstract; foreach (MethodDesc decl in defType.EnumAllVirtualSlots()) { // Generic virtual methods are tracked by an orthogonal mechanism. if (decl.HasInstantiation) continue; MethodDesc impl = defType.FindVirtualFunctionTargetMethodOnObjectType(decl); bool implOwnerIsAbstract = ((MetadataType)impl.OwningType).IsAbstract; // We add a conditional dependency in two situations: // 1. The implementation is on this type. This is pretty obvious. // 2. The implementation comes from an abstract base type. We do this // because abstract types only request a TentativeMethodEntrypoint of the implementation. // The actual method body of this entrypoint might still be trimmed away. // We don't need to do this for implementations from non-abstract bases since // non-abstract types will create a hard conditional reference to their virtual // method implementations. // // We also skip abstract methods since they don't have a body to refer to. if ((impl.OwningType == defType || implOwnerIsAbstract) && !impl.IsAbstract) { MethodDesc canonImpl = impl.GetCanonMethodTarget(CanonicalFormKind.Specific); // If this is an abstract type, only request a tentative entrypoint (whose body // might just be stubbed out). This lets us avoid generating method bodies for // virtual method on abstract types that are overriden in all their children. // // We don't do this if the method can be placed in the sealed vtable since // those can never be overriden by children anyway. bool canUseTentativeMethod = isNonInterfaceAbstractType && !decl.CanMethodBeInSealedVTable() && factory.CompilationModuleGroup.AllowVirtualMethodOnAbstractTypeOptimization(canonImpl); IMethodNode implNode = canUseTentativeMethod ? factory.TentativeMethodEntrypoint(canonImpl, impl.OwningType.IsValueType) : factory.MethodEntrypoint(canonImpl, impl.OwningType.IsValueType); result.Add(new CombinedDependencyListEntry(implNode, factory.VirtualMethodUse(decl), "Virtual method")); } if (impl.OwningType == defType) { factory.MetadataManager.NoteOverridingMethod(decl, impl); } } Debug.Assert( _type == defType || ((System.Collections.IStructuralEquatable)defType.RuntimeInterfaces).Equals(_type.RuntimeInterfaces, EqualityComparer<DefType>.Default)); // Add conditional dependencies for interface methods the type implements. For example, if the type T implements // interface IFoo which has a method M1, add a dependency on T.M1 dependent on IFoo.M1 being called, since it's // possible for any IFoo object to actually be an instance of T. DefType[] defTypeRuntimeInterfaces = defType.RuntimeInterfaces; for (int interfaceIndex = 0; interfaceIndex < defTypeRuntimeInterfaces.Length; interfaceIndex++) { DefType interfaceType = defTypeRuntimeInterfaces[interfaceIndex]; Debug.Assert(interfaceType.IsInterface); bool isVariantInterfaceImpl = VariantInterfaceMethodUseNode.IsVariantInterfaceImplementation(factory, _type, interfaceType); foreach (MethodDesc interfaceMethod in interfaceType.GetAllVirtualMethods()) { // Generic virtual methods are tracked by an orthogonal mechanism. if (interfaceMethod.HasInstantiation) continue; // Static virtual methods are resolved at compile time if (interfaceMethod.Signature.IsStatic) continue; MethodDesc implMethod = defType.ResolveInterfaceMethodToVirtualMethodOnType(interfaceMethod); if (implMethod != null) { result.Add(new CombinedDependencyListEntry(factory.VirtualMethodUse(implMethod), factory.VirtualMethodUse(interfaceMethod), "Interface method")); // If any of the implemented interfaces have variance, calls against compatible interface methods // could result in interface methods of this type being used (e.g. IEnumerable<object>.GetEnumerator() // can dispatch to an implementation of IEnumerable<string>.GetEnumerator()). if (isVariantInterfaceImpl) { MethodDesc typicalInterfaceMethod = interfaceMethod.GetTypicalMethodDefinition(); result.Add(new CombinedDependencyListEntry(factory.VirtualMethodUse(implMethod), factory.VariantInterfaceMethodUse(typicalInterfaceMethod), "Interface method")); result.Add(new CombinedDependencyListEntry(factory.VirtualMethodUse(interfaceMethod), factory.VariantInterfaceMethodUse(typicalInterfaceMethod), "Interface method")); } factory.MetadataManager.NoteOverridingMethod(interfaceMethod, implMethod); } else { // Is the implementation provided by a default interface method? // If so, add a dependency on the entrypoint directly since nobody else is going to do that // (interface types have an empty vtable, modulo their generic dictionary). TypeDesc interfaceOnDefinition = defType.GetTypeDefinition().RuntimeInterfaces[interfaceIndex]; MethodDesc interfaceMethodDefinition = interfaceMethod; if (!interfaceType.IsTypeDefinition) interfaceMethodDefinition = factory.TypeSystemContext.GetMethodForInstantiatedType(interfaceMethod.GetTypicalMethodDefinition(), (InstantiatedType)interfaceOnDefinition); var resolution = defType.GetTypeDefinition().ResolveInterfaceMethodToDefaultImplementationOnType(interfaceMethodDefinition, out implMethod); if (resolution == DefaultInterfaceMethodResolution.DefaultImplementation) { DefType providingInterfaceDefinitionType = (DefType)implMethod.OwningType; implMethod = implMethod.InstantiateSignature(defType.Instantiation, Instantiation.Empty); MethodDesc defaultIntfMethod = implMethod.GetCanonMethodTarget(CanonicalFormKind.Specific); if (defaultIntfMethod.IsCanonicalMethod(CanonicalFormKind.Any)) { defaultIntfMethod = factory.TypeSystemContext.GetDefaultInterfaceMethodImplementationThunk(defaultIntfMethod, _type.ConvertToCanonForm(CanonicalFormKind.Specific), providingInterfaceDefinitionType); } result.Add(new CombinedDependencyListEntry(factory.MethodEntrypoint(defaultIntfMethod), factory.VirtualMethodUse(interfaceMethod), "Interface method")); factory.MetadataManager.NoteOverridingMethod(interfaceMethod, implMethod); } } } } } factory.MetadataManager.GetConditionalDependenciesDueToEETypePresence(ref result, factory, _type); return result; } public static bool IsTypeNodeShareable(TypeDesc type) { return type.IsParameterizedType || type.IsFunctionPointer || type is InstantiatedType; } internal static bool MethodHasNonGenericILMethodBody(MethodDesc method) { // Generic methods have their own generic dictionaries if (method.HasInstantiation) return false; // Abstract methods don't have a body if (method.IsAbstract) return false; // PInvoke methods are not permitted on generic types, // but let's not crash the compilation because of that. if (method.IsPInvoke) return false; // CoreRT can generate method bodies for these no matter what (worst case // they'll be throwing). We don't want to take the "return false" code path on CoreRT because // delegate methods fall into the runtime implemented category on CoreRT, but we // just treat them like regular method bodies. return true; } protected override DependencyList ComputeNonRelocationBasedDependencies(NodeFactory factory) { DependencyList dependencies = new DependencyList(); // Include the optional fields by default. We don't know if optional fields will be needed until // all of the interface usage has been stabilized. If we end up not needing it, the MethodTable node will not // generate any relocs to it, and the optional fields node will instruct the object writer to skip // emitting it. dependencies.Add(new DependencyListEntry(_optionalFieldsNode, "Optional fields")); // TODO-SIZE: We probably don't need to add these for all EETypes StaticsInfoHashtableNode.AddStaticsInfoDependencies(ref dependencies, factory, _type); if (EmitVirtualSlotsAndInterfaces) { if (!_type.IsArrayTypeWithoutGenericInterfaces()) { // Sealed vtables have relative pointers, so to minimize size, we build sealed vtables for the canonical types dependencies.Add(new DependencyListEntry(factory.SealedVTable(_type.ConvertToCanonForm(CanonicalFormKind.Specific)), "Sealed Vtable")); } // Also add the un-normalized vtable slices of implemented interfaces. // This is important to do in the scanning phase so that the compilation phase can find // vtable information for things like IEnumerator<List<__Canon>>. foreach (TypeDesc intface in _type.RuntimeInterfaces) dependencies.Add(factory.VTable(intface), "Interface vtable slice"); // Generated type contains generic virtual methods that will get added to the GVM tables if (TypeGVMEntriesNode.TypeNeedsGVMTableEntries(_type)) { dependencies.Add(new DependencyListEntry(factory.TypeGVMEntries(_type.GetTypeDefinition()), "Type with generic virtual methods")); AddDependenciesForUniversalGVMSupport(factory, _type, ref dependencies); TypeDesc canonicalType = _type.ConvertToCanonForm(CanonicalFormKind.Specific); if (canonicalType != _type) dependencies.Add(factory.ConstructedTypeSymbol(canonicalType), "Type with generic virtual methods"); } } if (factory.CompilationModuleGroup.PresenceOfEETypeImpliesAllMethodsOnType(_type)) { if (_type.IsArray || _type.IsDefType) { // If the compilation group wants this type to be fully promoted, ensure that all non-generic methods of the // type are generated. // This may be done for several reasons: // - The MethodTable may be going to be COMDAT folded with other EETypes generated in a different object file // This means their generic dictionaries need to have identical contents. The only way to achieve that is // by generating the entries for all methods that contribute to the dictionary, and sorting the dictionaries. // - The generic type may be imported into another module, in which case the generic dictionary imported // must represent all of the methods, as the set of used methods cannot be known at compile time // - As a matter of policy, the type and its methods may be exported for use in another module. The policy // may wish to specify that if a type is to be placed into a shared module, all of the methods associated with // it should be also be exported. foreach (var method in _type.GetClosestDefType().ConvertToCanonForm(CanonicalFormKind.Specific).GetAllMethods()) { if (!MethodHasNonGenericILMethodBody(method)) continue; dependencies.Add(factory.MethodEntrypoint(method.GetCanonMethodTarget(CanonicalFormKind.Specific)), "Ensure all methods on type due to CompilationModuleGroup policy"); } } } if (!ConstructedEETypeNode.CreationAllowed(_type)) { // If necessary MethodTable is the highest load level for this type, ask the metadata manager // if we have any dependencies due to reflectability. factory.MetadataManager.GetDependenciesDueToReflectability(ref dependencies, factory, _type); // If necessary MethodTable is the highest load level, consider this a module use if (_type is MetadataType mdType && mdType.Module.GetGlobalModuleType().GetStaticConstructor() is MethodDesc moduleCctor) { dependencies.Add(factory.MethodEntrypoint(moduleCctor), "Type in a module with initializer"); } } return dependencies; } public override ObjectData GetData(NodeFactory factory, bool relocsOnly) { ObjectDataBuilder objData = new ObjectDataBuilder(factory, relocsOnly); objData.RequireInitialPointerAlignment(); objData.AddSymbol(this); ComputeOptionalEETypeFields(factory, relocsOnly); OutputGCDesc(ref objData); OutputComponentSize(ref objData); OutputFlags(factory, ref objData); objData.EmitInt(BaseSize); OutputRelatedType(factory, ref objData); // Number of vtable slots will be only known later. Reseve the bytes for it. var vtableSlotCountReservation = objData.ReserveShort(); // Number of interfaces will only be known later. Reserve the bytes for it. var interfaceCountReservation = objData.ReserveShort(); objData.EmitInt(_type.GetHashCode()); if (EmitVirtualSlotsAndInterfaces) { // Emit VTable Debug.Assert(objData.CountBytes - ((ISymbolDefinitionNode)this).Offset == GetVTableOffset(objData.TargetPointerSize)); SlotCounter virtualSlotCounter = SlotCounter.BeginCounting(ref /* readonly */ objData); OutputVirtualSlots(factory, ref objData, _type, _type, _type, relocsOnly); // Update slot count int numberOfVtableSlots = virtualSlotCounter.CountSlots(ref /* readonly */ objData); objData.EmitShort(vtableSlotCountReservation, checked((short)numberOfVtableSlots)); // Emit interface map SlotCounter interfaceSlotCounter = SlotCounter.BeginCounting(ref /* readonly */ objData); OutputInterfaceMap(factory, ref objData); // Update slot count int numberOfInterfaceSlots = interfaceSlotCounter.CountSlots(ref /* readonly */ objData); objData.EmitShort(interfaceCountReservation, checked((short)numberOfInterfaceSlots)); } else { // If we're not emitting any slots, the number of slots is zero. objData.EmitShort(vtableSlotCountReservation, 0); objData.EmitShort(interfaceCountReservation, 0); } OutputTypeManagerIndirection(factory, ref objData); OutputWritableData(factory, ref objData); OutputFinalizerMethod(factory, ref objData); OutputOptionalFields(factory, ref objData); OutputSealedVTable(factory, relocsOnly, ref objData); OutputGenericInstantiationDetails(factory, ref objData); return objData.ToObjectData(); } /// <summary> /// Returns the offset within an MethodTable of the beginning of VTable entries /// </summary> /// <param name="pointerSize">The size of a pointer in bytes in the target architecture</param> public static int GetVTableOffset(int pointerSize) { return 16 + pointerSize; } protected virtual int GCDescSize => 0; protected virtual void OutputGCDesc(ref ObjectDataBuilder builder) { // Non-constructed EETypeNodes get no GC Desc Debug.Assert(GCDescSize == 0); } private void OutputComponentSize(ref ObjectDataBuilder objData) { if (_type.IsArray) { TypeDesc elementType = ((ArrayType)_type).ElementType; if (elementType == elementType.Context.UniversalCanonType) { objData.EmitShort(0); } else { int elementSize = elementType.GetElementSize().AsInt; // We validated that this will fit the short when the node was constructed. No need for nice messages. objData.EmitShort((short)checked((ushort)elementSize)); } } else if (_type.IsString) { objData.EmitShort(StringComponentSize.Value); } else { objData.EmitShort(0); } } private void OutputFlags(NodeFactory factory, ref ObjectDataBuilder objData) { UInt16 flags = EETypeBuilderHelpers.ComputeFlags(_type); if (_type.GetTypeDefinition() == factory.ArrayOfTEnumeratorType) { // Generic array enumerators use special variance rules recognized by the runtime flags |= (UInt16)EETypeFlags.GenericVarianceFlag; } if (factory.TypeSystemContext.IsGenericArrayInterfaceType(_type)) { // Runtime casting logic relies on all interface types implemented on arrays // to have the variant flag set (even if all the arguments are non-variant). // This supports e.g. casting uint[] to ICollection<int> flags |= (UInt16)EETypeFlags.GenericVarianceFlag; } if (_type.IsIDynamicInterfaceCastable) { flags |= (UInt16)EETypeFlags.IDynamicInterfaceCastableFlag; } ISymbolNode relatedTypeNode = GetRelatedTypeNode(factory); // If the related type (base type / array element type / pointee type) is not part of this compilation group, and // the output binaries will be multi-file (not multiple object files linked together), indicate to the runtime // that it should indirect through the import address table if (relatedTypeNode != null && relatedTypeNode.RepresentsIndirectionCell) { flags |= (UInt16)EETypeFlags.RelatedTypeViaIATFlag; } if (HasOptionalFields) { flags |= (UInt16)EETypeFlags.OptionalFieldsFlag; } if (this is ClonedConstructedEETypeNode) { flags |= (UInt16)EETypeKind.ClonedEEType; } objData.EmitShort((short)flags); } protected virtual int BaseSize { get { int pointerSize = _type.Context.Target.PointerSize; int objectSize; if (_type.IsDefType) { LayoutInt instanceByteCount = ((DefType)_type).InstanceByteCount; if (instanceByteCount.IsIndeterminate) { // Some value must be put in, but the specific value doesn't matter as it // isn't used for specific instantiations, and the universal canon MethodTable // is never associated with an allocated object. objectSize = pointerSize; } else { objectSize = pointerSize + ((DefType)_type).InstanceByteCount.AsInt; // +pointerSize for SyncBlock } if (_type.IsValueType) objectSize += pointerSize; // + EETypePtr field inherited from System.Object } else if (_type.IsArray) { objectSize = 3 * pointerSize; // SyncBlock + EETypePtr + Length if (_type.IsMdArray) objectSize += 2 * sizeof(int) * ((ArrayType)_type).Rank; } else if (_type.IsPointer) { // These never get boxed and don't have a base size. Use a sentinel value recognized by the runtime. return ParameterizedTypeShapeConstants.Pointer; } else if (_type.IsByRef) { // These never get boxed and don't have a base size. Use a sentinel value recognized by the runtime. return ParameterizedTypeShapeConstants.ByRef; } else throw new NotImplementedException(); objectSize = AlignmentHelper.AlignUp(objectSize, pointerSize); objectSize = Math.Max(MinimumObjectSize, objectSize); if (_type.IsString) { // If this is a string, throw away objectSize we computed so far. Strings are special. // SyncBlock + EETypePtr + length + firstChar objectSize = 2 * pointerSize + sizeof(int) + StringComponentSize.Value; } return objectSize; } } protected virtual ISymbolNode GetBaseTypeNode(NodeFactory factory) { return _type.BaseType != null ? factory.NecessaryTypeSymbol(_type.BaseType) : null; } private ISymbolNode GetRelatedTypeNode(NodeFactory factory) { ISymbolNode relatedTypeNode = null; if (_type.IsArray || _type.IsPointer || _type.IsByRef) { var parameterType = ((ParameterizedType)_type).ParameterType; relatedTypeNode = factory.NecessaryTypeSymbol(parameterType); } else { TypeDesc baseType = _type.BaseType; if (baseType != null) { relatedTypeNode = GetBaseTypeNode(factory); } } return relatedTypeNode; } protected virtual void OutputRelatedType(NodeFactory factory, ref ObjectDataBuilder objData) { ISymbolNode relatedTypeNode = GetRelatedTypeNode(factory); if (relatedTypeNode != null) { objData.EmitPointerReloc(relatedTypeNode); } else { objData.EmitZeroPointer(); } } private void OutputVirtualSlots(NodeFactory factory, ref ObjectDataBuilder objData, TypeDesc implType, TypeDesc declType, TypeDesc templateType, bool relocsOnly) { Debug.Assert(EmitVirtualSlotsAndInterfaces); declType = declType.GetClosestDefType(); templateType = templateType.ConvertToCanonForm(CanonicalFormKind.Specific); var baseType = declType.BaseType; if (baseType != null) { Debug.Assert(templateType.BaseType != null); OutputVirtualSlots(factory, ref objData, implType, baseType, templateType.BaseType, relocsOnly); } // // In the universal canonical types case, we could have base types in the hierarchy that are partial universal canonical types. // The presence of these types could cause incorrect vtable layouts, so we need to fully canonicalize them and walk the // hierarchy of the template type of the original input type to detect these cases. // // Exmaple: we begin with Derived<__UniversalCanon> and walk the template hierarchy: // // class Derived<T> : Middle<T, MyStruct> { } // -> Template is Derived<__UniversalCanon> and needs a dictionary slot // // -> Basetype tempalte is Middle<__UniversalCanon, MyStruct>. It's a partial // Universal canonical type, so we need to fully canonicalize it. // // class Middle<T, U> : Base<U> { } // -> Template is Middle<__UniversalCanon, __UniversalCanon> and needs a dictionary slot // // -> Basetype template is Base<__UniversalCanon> // // class Base<T> { } // -> Template is Base<__UniversalCanon> and needs a dictionary slot. // // If we had not fully canonicalized the Middle class template, we would have ended up with Base<MyStruct>, which does not need // a dictionary slot, meaning we would have created a vtable layout that the runtime does not expect. // // The generic dictionary pointer occupies the first slot of each type vtable slice if (declType.HasGenericDictionarySlot() || templateType.HasGenericDictionarySlot()) { // All generic interface types have a dictionary slot, but only some of them have an actual dictionary. bool isInterfaceWithAnEmptySlot = declType.IsInterface && declType.ConvertToCanonForm(CanonicalFormKind.Specific) == declType; // Note: Canonical type instantiations always have a generic dictionary vtable slot, but it's empty // Note: If the current EETypeNode represents a universal canonical type, any dictionary slot must be empty if (declType.IsCanonicalSubtype(CanonicalFormKind.Any) || implType.IsCanonicalSubtype(CanonicalFormKind.Universal) || factory.LazyGenericsPolicy.UsesLazyGenerics(declType) || isInterfaceWithAnEmptySlot) objData.EmitZeroPointer(); else objData.EmitPointerReloc(factory.TypeGenericDictionary(declType)); } VTableSliceNode declVTable = factory.VTable(declType); // It's only okay to touch the actual list of slots if we're in the final emission phase // or the vtable is not built lazily. if (relocsOnly && !declVTable.HasFixedSlots) return; // Inteface types don't place anything else in their physical vtable. // Interfaces have logical slots for their methods but since they're all abstract, they would be zero. // We place default implementations of interface methods into the vtable of the interface-implementing // type, pretending there was an extra virtual slot. if (_type.IsInterface) return; // Actual vtable slots follow IReadOnlyList<MethodDesc> virtualSlots = declVTable.Slots; for (int i = 0; i < virtualSlots.Count; i++) { MethodDesc declMethod = virtualSlots[i]; // Object.Finalize shouldn't get a virtual slot. Finalizer is stored in an optional field // instead: most MethodTable don't have a finalizer, but all EETypes contain Object's vtable. // This lets us save a pointer (+reloc) on most EETypes. Debug.Assert(!declType.IsObject || declMethod.Name != "Finalize"); // No generic virtual methods can appear in the vtable! Debug.Assert(!declMethod.HasInstantiation); MethodDesc implMethod = implType.GetClosestDefType().FindVirtualFunctionTargetMethodOnObjectType(declMethod); // Final NewSlot methods cannot be overridden, and therefore can be placed in the sealed-vtable to reduce the size of the vtable // of this type and any type that inherits from it. if (declMethod.CanMethodBeInSealedVTable() && !declType.IsArrayTypeWithoutGenericInterfaces()) continue; if (!implMethod.IsAbstract) { MethodDesc canonImplMethod = implMethod.GetCanonMethodTarget(CanonicalFormKind.Specific); // If the type we're generating now is abstract, and the implementation comes from an abstract type, // only use a tentative method entrypoint that can have its body replaced by a throwing stub // if no "hard" reference to that entrypoint exists in the program. // This helps us to eliminate method bodies for virtual methods on abstract types that are fully overriden // in the children of that abstract type. bool canUseTentativeEntrypoint = implType is MetadataType mdImplType && mdImplType.IsAbstract && !mdImplType.IsInterface && implMethod.OwningType is MetadataType mdImplMethodType && mdImplMethodType.IsAbstract && factory.CompilationModuleGroup.AllowVirtualMethodOnAbstractTypeOptimization(canonImplMethod); IMethodNode implSymbol = canUseTentativeEntrypoint ? factory.TentativeMethodEntrypoint(canonImplMethod, implMethod.OwningType.IsValueType) : factory.MethodEntrypoint(canonImplMethod, implMethod.OwningType.IsValueType); objData.EmitPointerReloc(implSymbol); } else { objData.EmitZeroPointer(); } } } protected virtual IEETypeNode GetInterfaceTypeNode(NodeFactory factory, TypeDesc interfaceType) { return factory.NecessaryTypeSymbol(interfaceType); } protected virtual void OutputInterfaceMap(NodeFactory factory, ref ObjectDataBuilder objData) { Debug.Assert(EmitVirtualSlotsAndInterfaces); foreach (var itf in _type.RuntimeInterfaces) { objData.EmitPointerRelocOrIndirectionReference(GetInterfaceTypeNode(factory, itf)); } } private void OutputFinalizerMethod(NodeFactory factory, ref ObjectDataBuilder objData) { if (_type.HasFinalizer) { MethodDesc finalizerMethod = _type.GetFinalizer(); MethodDesc canonFinalizerMethod = finalizerMethod.GetCanonMethodTarget(CanonicalFormKind.Specific); if (factory.Target.SupportsRelativePointers) objData.EmitReloc(factory.MethodEntrypoint(canonFinalizerMethod), RelocType.IMAGE_REL_BASED_RELPTR32); else objData.EmitPointerReloc(factory.MethodEntrypoint(canonFinalizerMethod)); } } protected void OutputTypeManagerIndirection(NodeFactory factory, ref ObjectDataBuilder objData) { if (factory.Target.SupportsRelativePointers) objData.EmitReloc(factory.TypeManagerIndirection, RelocType.IMAGE_REL_BASED_RELPTR32); else objData.EmitPointerReloc(factory.TypeManagerIndirection); } protected void OutputWritableData(NodeFactory factory, ref ObjectDataBuilder objData) { if (factory.Target.SupportsRelativePointers) { Utf8StringBuilder writableDataBlobName = new Utf8StringBuilder(); writableDataBlobName.Append("__writableData"); writableDataBlobName.Append(factory.NameMangler.GetMangledTypeName(_type)); BlobNode blob = factory.UninitializedWritableDataBlob(writableDataBlobName.ToUtf8String(), WritableData.GetSize(factory.Target.PointerSize), WritableData.GetAlignment(factory.Target.PointerSize)); objData.EmitReloc(blob, RelocType.IMAGE_REL_BASED_RELPTR32); } } protected void OutputOptionalFields(NodeFactory factory, ref ObjectDataBuilder objData) { if (HasOptionalFields) { if (factory.Target.SupportsRelativePointers) objData.EmitReloc(_optionalFieldsNode, RelocType.IMAGE_REL_BASED_RELPTR32); else objData.EmitPointerReloc(_optionalFieldsNode); } } private void OutputSealedVTable(NodeFactory factory, bool relocsOnly, ref ObjectDataBuilder objData) { if (EmitVirtualSlotsAndInterfaces && !_type.IsArrayTypeWithoutGenericInterfaces()) { // Sealed vtables have relative pointers, so to minimize size, we build sealed vtables for the canonical types SealedVTableNode sealedVTable = factory.SealedVTable(_type.ConvertToCanonForm(CanonicalFormKind.Specific)); if (sealedVTable.BuildSealedVTableSlots(factory, relocsOnly) && sealedVTable.NumSealedVTableEntries > 0) { if (factory.Target.SupportsRelativePointers) objData.EmitReloc(sealedVTable, RelocType.IMAGE_REL_BASED_RELPTR32); else objData.EmitPointerReloc(sealedVTable); } } } private void OutputGenericInstantiationDetails(NodeFactory factory, ref ObjectDataBuilder objData) { if (_type.HasInstantiation && !_type.IsTypeDefinition) { IEETypeNode typeDefNode = factory.NecessaryTypeSymbol(_type.GetTypeDefinition()); if (factory.Target.SupportsRelativePointers) objData.EmitRelativeRelocOrIndirectionReference(typeDefNode); else objData.EmitPointerRelocOrIndirectionReference(typeDefNode); GenericCompositionDetails details; if (_type.GetTypeDefinition() == factory.ArrayOfTEnumeratorType) { // Generic array enumerators use special variance rules recognized by the runtime details = new GenericCompositionDetails(_type.Instantiation, new[] { GenericVariance.ArrayCovariant }); } else if (factory.TypeSystemContext.IsGenericArrayInterfaceType(_type)) { // Runtime casting logic relies on all interface types implemented on arrays // to have the variant flag set (even if all the arguments are non-variant). // This supports e.g. casting uint[] to ICollection<int> details = new GenericCompositionDetails(_type, forceVarianceInfo: true); } else details = new GenericCompositionDetails(_type); ISymbolNode compositionNode = factory.GenericComposition(details); if (factory.Target.SupportsRelativePointers) objData.EmitReloc(compositionNode, RelocType.IMAGE_REL_BASED_RELPTR32); else objData.EmitPointerReloc(compositionNode); } } /// <summary> /// Populate the OptionalFieldsRuntimeBuilder if any optional fields are required. /// </summary> protected internal virtual void ComputeOptionalEETypeFields(NodeFactory factory, bool relocsOnly) { if (!relocsOnly && MightHaveInterfaceDispatchMap(factory)) { _optionalFieldsBuilder.SetFieldValue(EETypeOptionalFieldTag.DispatchMap, checked((uint)factory.InterfaceDispatchMapIndirection(Type).IndexFromBeginningOfArray)); } ComputeRareFlags(factory, relocsOnly); ComputeNullableValueOffset(); ComputeValueTypeFieldPadding(); } void ComputeRareFlags(NodeFactory factory, bool relocsOnly) { uint flags = 0; MetadataType metadataType = _type as MetadataType; if (factory.PreinitializationManager.HasLazyStaticConstructor(_type)) { flags |= (uint)EETypeRareFlags.HasCctorFlag; } if (_type.RequiresAlign8()) { flags |= (uint)EETypeRareFlags.RequiresAlign8Flag; } TargetArchitecture targetArch = _type.Context.Target.Architecture; if (metadataType != null && (targetArch == TargetArchitecture.ARM || targetArch == TargetArchitecture.ARM64) && metadataType.IsHomogeneousAggregate) { flags |= (uint)EETypeRareFlags.IsHFAFlag; } if (metadataType != null && !_type.IsInterface && metadataType.IsAbstract) { flags |= (uint)EETypeRareFlags.IsAbstractClassFlag; } if (_type.IsByRefLike) { flags |= (uint)EETypeRareFlags.IsByRefLikeFlag; } if (EmitVirtualSlotsAndInterfaces && !_type.IsArrayTypeWithoutGenericInterfaces()) { SealedVTableNode sealedVTable = factory.SealedVTable(_type.ConvertToCanonForm(CanonicalFormKind.Specific)); if (sealedVTable.BuildSealedVTableSlots(factory, relocsOnly) && sealedVTable.NumSealedVTableEntries > 0) flags |= (uint)EETypeRareFlags.HasSealedVTableEntriesFlag; } if (flags != 0) { _optionalFieldsBuilder.SetFieldValue(EETypeOptionalFieldTag.RareFlags, flags); } } /// <summary> /// To support boxing / unboxing, the offset of the value field of a Nullable type is recorded on the MethodTable. /// This is variable according to the alignment requirements of the Nullable&lt;T&gt; type parameter. /// </summary> void ComputeNullableValueOffset() { if (!_type.IsNullable) return; if (!_type.Instantiation[0].IsCanonicalSubtype(CanonicalFormKind.Universal)) { var field = _type.GetKnownField("value"); // In the definition of Nullable<T>, the first field should be the boolean representing "hasValue" Debug.Assert(field.Offset.AsInt > 0); // The contract with the runtime states the Nullable value offset is stored with the boolean "hasValue" size subtracted // to get a small encoding size win. _optionalFieldsBuilder.SetFieldValue(EETypeOptionalFieldTag.NullableValueOffset, (uint)field.Offset.AsInt - 1); } } protected virtual void ComputeValueTypeFieldPadding() { // All objects that can have appreciable which can be derived from size compute ValueTypeFieldPadding. // Unfortunately, the name ValueTypeFieldPadding is now wrong to avoid integration conflicts. // Interfaces, sealed types, and non-DefTypes cannot be derived from if (_type.IsInterface || !_type.IsDefType || (_type.IsSealed() && !_type.IsValueType)) return; DefType defType = _type as DefType; Debug.Assert(defType != null); uint valueTypeFieldPaddingEncoded; if (defType.InstanceByteCount.IsIndeterminate) { valueTypeFieldPaddingEncoded = EETypeBuilderHelpers.ComputeValueTypeFieldPaddingFieldValue(0, 1, _type.Context.Target.PointerSize); } else { int numInstanceFieldBytes = defType.InstanceByteCountUnaligned.AsInt; // Check if we have a type derived from System.ValueType or System.Enum, but not System.Enum itself if (defType.IsValueType) { // Value types should have at least 1 byte of size Debug.Assert(numInstanceFieldBytes >= 1); // The size doesn't currently include the MethodTable pointer size. We need to add this so that // the number of instance field bytes consistently represents the boxed size. numInstanceFieldBytes += _type.Context.Target.PointerSize; } // For unboxing to work correctly and for supporting dynamic type loading for derived types we need // to record the actual size of the fields of a type without any padding for GC heap allocation (since // we can unbox into locals or arrays where this padding is not used, and because field layout for derived // types is effected by the unaligned base size). We don't want to store this information for all EETypes // since it's only relevant for value types, and derivable types so it's added as an optional field. It's // also enough to simply store the size of the padding (between 0 and 4 or 8 bytes for 32-bit and 0 and 8 or 16 bytes // for 64-bit) which cuts down our storage requirements. uint valueTypeFieldPadding = checked((uint)((BaseSize - _type.Context.Target.PointerSize) - numInstanceFieldBytes)); valueTypeFieldPaddingEncoded = EETypeBuilderHelpers.ComputeValueTypeFieldPaddingFieldValue(valueTypeFieldPadding, (uint)defType.InstanceFieldAlignment.AsInt, _type.Context.Target.PointerSize); } if (valueTypeFieldPaddingEncoded != 0) { _optionalFieldsBuilder.SetFieldValue(EETypeOptionalFieldTag.ValueTypeFieldPadding, valueTypeFieldPaddingEncoded); } } protected override void OnMarked(NodeFactory context) { if (!context.IsCppCodegenTemporaryWorkaround) { Debug.Assert(_type.IsTypeDefinition || !_type.HasSameTypeDefinition(context.ArrayOfTClass), "Asking for Array<T> MethodTable"); } } public static void AddDependenciesForStaticsNode(NodeFactory factory, TypeDesc type, ref DependencyList dependencies) { // To ensure that the behvior of FieldInfo.GetValue/SetValue remains correct, // if a type may be reflectable, and it is generic, if a canonical instantiation of reflection // can exist which can refer to the associated type of this static base, ensure that type // has an MethodTable. (Which will allow the static field lookup logic to find the right type) if (type.HasInstantiation && !factory.MetadataManager.IsReflectionBlocked(type)) { // TODO-SIZE: This current implementation is slightly generous, as it does not attempt to restrict // the created types to the maximum extent by investigating reflection data and such. Here we just // check if we support use of a canonically equivalent type to perform reflection. // We don't check to see if reflection is enabled on the type. if (factory.TypeSystemContext.SupportsUniversalCanon || (factory.TypeSystemContext.SupportsCanon && (type != type.ConvertToCanonForm(CanonicalFormKind.Specific)))) { if (dependencies == null) dependencies = new DependencyList(); dependencies.Add(factory.NecessaryTypeSymbol(type), "Static block owning type is necessary for canonically equivalent reflection"); } } } protected static void AddDependenciesForUniversalGVMSupport(NodeFactory factory, TypeDesc type, ref DependencyList dependencies) { if (factory.TypeSystemContext.SupportsUniversalCanon) { foreach (MethodDesc method in type.GetVirtualMethods()) { if (!method.HasInstantiation) continue; if (method.IsAbstract) continue; TypeDesc[] universalCanonArray = new TypeDesc[method.Instantiation.Length]; for (int i = 0; i < universalCanonArray.Length; i++) universalCanonArray[i] = factory.TypeSystemContext.UniversalCanonType; MethodDesc universalCanonMethodNonCanonicalized = method.MakeInstantiatedMethod(new Instantiation(universalCanonArray)); MethodDesc universalCanonGVMMethod = universalCanonMethodNonCanonicalized.GetCanonMethodTarget(CanonicalFormKind.Universal); if (dependencies == null) dependencies = new DependencyList(); dependencies.Add(new DependencyListEntry(factory.MethodEntrypoint(universalCanonGVMMethod), "USG GVM Method")); } } } public override int ClassCode => 1521789141; public override int CompareToImpl(ISortableNode other, CompilerComparer comparer) { return comparer.Compare(_type, ((EETypeNode)other)._type); } public override string ToString() { return _type.ToString(); } private struct SlotCounter { private int _startBytes; public static SlotCounter BeginCounting(ref /* readonly */ ObjectDataBuilder builder) => new SlotCounter { _startBytes = builder.CountBytes }; public int CountSlots(ref /* readonly */ ObjectDataBuilder builder) { int bytesEmitted = builder.CountBytes - _startBytes; Debug.Assert(bytesEmitted % builder.TargetPointerSize == 0); return bytesEmitted / builder.TargetPointerSize; } } } }
1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/GenericLookupResult.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Diagnostics; using Internal.IL; using Internal.Text; using Internal.TypeSystem; using ILCompiler.DependencyAnalysisFramework; namespace ILCompiler.DependencyAnalysis { public enum GenericLookupResultReferenceType { Direct, // The slot stores a direct pointer to the target Indirect, // The slot is an indirection cell which points to the direct pointer ConditionalIndirect, // The slot may be a direct pointer or an indirection cell, depending on the last digit } // Represents a generic lookup within a canonical method body. // TODO: unify with NativeFormat.FixupSignatureKind public enum LookupResultType { Invalid, MethodTable, // a type UnwrapNullable, // a type (The type T described by a type spec that is generic over Nullable<T>) NonGcStatic, // the non-gc statics of a type GcStatic, // the gc statics of a type Method, // a method InterfaceDispatchCell, // the dispatch cell for calling an interface method MethodDictionary, // a dictionary for calling a generic method UnboxingStub, // the unboxing stub for a method ArrayType, // an array of type DefaultCtor, // default ctor of a type AllocObject, // the allocator of a type GvmVtableOffset, // vtable offset of a generic virtual method ProfileCounter, // profiling counter cell MethodLdToken, // a ldtoken result for a method FieldLdToken, // a ldtoken result for a field Field, // a field descriptor IsInst, // isinst helper CastClass, // castclass helper AllocArray, // the array allocator of a type TypeSize, // size of the type FieldOffset, // field offset CallingConvention_NoInstParam, // CallingConventionConverterThunk NO_INSTANTIATING_PARAM CallingConvention_HasInstParam, // CallingConventionConverterThunk HAS_INSTANTIATING_PARAM CallingConvention_MaybeInstParam, // CallingConventionConverterThunk MAYBE_INSTANTIATING_PARAM VtableOffset, // Offset of a virtual method into the type's vtable Constrained, // ConstrainedCallDesc ConstrainedDirect, // Direct ConstrainedCallDesc Integer, // Integer UnboxingMethod, // UnboxingMethod } public interface IGenericLookupResultTocWriter { void WriteData(GenericLookupResultReferenceType referenceType, LookupResultType slotType, TypeSystemEntity context); void WriteIntegerSlot(int value); } public struct GenericLookupResultContext { private readonly TypeSystemEntity _canonicalOwner; public readonly Instantiation TypeInstantiation; public readonly Instantiation MethodInstantiation; public TypeSystemEntity Context { get { if (_canonicalOwner is TypeDesc) { var owningTypeDefinition = (MetadataType)((TypeDesc)_canonicalOwner).GetTypeDefinition(); Debug.Assert(owningTypeDefinition.Instantiation.Length == TypeInstantiation.Length); Debug.Assert(MethodInstantiation.IsNull || MethodInstantiation.Length == 0); return owningTypeDefinition.MakeInstantiatedType(TypeInstantiation); } Debug.Assert(_canonicalOwner is MethodDesc); MethodDesc owningMethodDefinition = ((MethodDesc)_canonicalOwner).GetTypicalMethodDefinition(); Debug.Assert(owningMethodDefinition.Instantiation.Length == MethodInstantiation.Length); MethodDesc concreteMethod = owningMethodDefinition; if (!TypeInstantiation.IsNull && TypeInstantiation.Length > 0) { TypeDesc owningType = owningMethodDefinition.OwningType; Debug.Assert(owningType.Instantiation.Length == TypeInstantiation.Length); concreteMethod = owningType.Context.GetMethodForInstantiatedType(owningMethodDefinition, ((MetadataType)owningType).MakeInstantiatedType(TypeInstantiation)); } else { Debug.Assert(owningMethodDefinition.OwningType.Instantiation.IsNull || owningMethodDefinition.OwningType.Instantiation.Length == 0); } return concreteMethod.MakeInstantiatedMethod(MethodInstantiation); } } public GenericLookupResultContext(TypeSystemEntity canonicalOwner, Instantiation typeInst, Instantiation methodInst) { _canonicalOwner = canonicalOwner; TypeInstantiation = typeInst; MethodInstantiation = methodInst; } } /// <summary> /// Represents the result of a generic lookup within a canonical method body. /// The concrete artifact the generic lookup will result in can only be determined after substituting /// runtime determined types with a concrete generic context. Use /// <see cref="GetTarget(NodeFactory, Instantiation, Instantiation, GenericDictionaryNode)"/> to obtain the concrete /// node the result points to. /// </summary> public abstract class GenericLookupResult { protected abstract int ClassCode { get; } public abstract ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary); public abstract void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb); public abstract override string ToString(); protected abstract int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer); protected abstract bool EqualsImpl(GenericLookupResult obj); protected abstract int GetHashCodeImpl(); public sealed override bool Equals(object obj) { GenericLookupResult other = obj as GenericLookupResult; if (obj == null) return false; return ClassCode == other.ClassCode && EqualsImpl(other); } public sealed override int GetHashCode() { return ClassCode * 31 + GetHashCodeImpl(); } public virtual void EmitDictionaryEntry(ref ObjectDataBuilder builder, NodeFactory factory, GenericLookupResultContext dictionary, GenericDictionaryNode dictionaryNode) { ISymbolNode target; try { target = GetTarget(factory, dictionary); } catch (TypeSystemException) { target = null; } if (target == null) { builder.EmitZeroPointer(); } else if (LookupResultReferenceType(factory) == GenericLookupResultReferenceType.ConditionalIndirect) { builder.EmitPointerRelocOrIndirectionReference(target); } else { builder.EmitPointerReloc(target); } } public virtual GenericLookupResultReferenceType LookupResultReferenceType(NodeFactory factory) { return GenericLookupResultReferenceType.Direct; } public abstract NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory); public abstract void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer); // Call this api to get non-reloc dependencies that arise from use of a dictionary lookup public virtual IEnumerable<DependencyNodeCore<NodeFactory>> NonRelocDependenciesFromUsage(NodeFactory factory) { return Array.Empty<DependencyNodeCore<NodeFactory>>(); } public class Comparer : IComparer<GenericLookupResult> { private TypeSystemComparer _comparer; public Comparer(TypeSystemComparer comparer) { _comparer = comparer; } public int Compare(GenericLookupResult x, GenericLookupResult y) { if (x == y) { return 0; } int codeX = x.ClassCode; int codeY = y.ClassCode; if (codeX == codeY) { Debug.Assert(x.GetType() == y.GetType()); int result = x.CompareToImpl(y, _comparer); // We did a reference equality check above so an "Equal" result is not expected Debug.Assert(result != 0); return result; } else { Debug.Assert(x.GetType() != y.GetType()); return codeX > codeY ? -1 : 1; } } } } /// <summary> /// Generic lookup result that points to an MethodTable. /// </summary> public sealed class TypeHandleGenericLookupResult : GenericLookupResult { private TypeDesc _type; protected override int ClassCode => 1623839081; public TypeHandleGenericLookupResult(TypeDesc type) { Debug.Assert(type.IsRuntimeDeterminedSubtype, "Concrete type in a generic dictionary?"); _type = type; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { // We are getting a maximally constructable type symbol because this might be something passed to newobj. TypeDesc instantiatedType = _type.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); factory.TypeSystemContext.DetectGenericCycles(dictionary.Context, instantiatedType); return factory.MaximallyConstructableType(instantiatedType); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("TypeHandle_"); sb.Append(nameMangler.GetMangledTypeName(_type)); } public TypeDesc Type => _type; public override string ToString() => $"TypeHandle: {_type}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.TypeHandleDictionarySlot(_type); } public override GenericLookupResultReferenceType LookupResultReferenceType(NodeFactory factory) { if (factory.CompilationModuleGroup.CanHaveReferenceThroughImportTable) { return GenericLookupResultReferenceType.ConditionalIndirect; } else { return GenericLookupResultReferenceType.Direct; } } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.MethodTable, _type); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_type, ((TypeHandleGenericLookupResult)other)._type); } protected override int GetHashCodeImpl() { return _type.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((TypeHandleGenericLookupResult)obj)._type == _type; } } /// <summary> /// Generic lookup result that points to an MethodTable where if the type is Nullable&lt;X&gt; the MethodTable is X /// </summary> public sealed class UnwrapNullableTypeHandleGenericLookupResult : GenericLookupResult { private TypeDesc _type; protected override int ClassCode => 53521918; public UnwrapNullableTypeHandleGenericLookupResult(TypeDesc type) { Debug.Assert(type.IsRuntimeDeterminedSubtype, "Concrete type in a generic dictionary?"); _type = type; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { TypeDesc instantiatedType = _type.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); // Unwrap the nullable type if necessary if (instantiatedType.IsNullable) instantiatedType = instantiatedType.Instantiation[0]; // We are getting a constructed type symbol because this might be something passed to newobj. return factory.ConstructedTypeSymbol(instantiatedType); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("UnwrapNullable_"); sb.Append(nameMangler.GetMangledTypeName(_type)); } public TypeDesc Type => _type; public override string ToString() => $"UnwrapNullable: {_type}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.UnwrapNullableTypeDictionarySlot(_type); } public override GenericLookupResultReferenceType LookupResultReferenceType(NodeFactory factory) { if (factory.CompilationModuleGroup.CanHaveReferenceThroughImportTable) { return GenericLookupResultReferenceType.ConditionalIndirect; } else { return GenericLookupResultReferenceType.Direct; } } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.UnwrapNullable, _type); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_type, ((UnwrapNullableTypeHandleGenericLookupResult)other)._type); } protected override int GetHashCodeImpl() { return _type.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((UnwrapNullableTypeHandleGenericLookupResult)obj)._type == _type; } } /// <summary> /// Generic lookup result that puts a field offset into the generic dictionary. /// </summary> internal sealed class FieldOffsetGenericLookupResult : GenericLookupResult { private FieldDesc _field; protected override int ClassCode => -1670293557; public FieldOffsetGenericLookupResult(FieldDesc field) { Debug.Assert(field.OwningType.IsRuntimeDeterminedSubtype, "Concrete field in a generic dictionary?"); _field = field; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { Debug.Fail("GetTarget for a FieldOffsetGenericLookupResult doesn't make sense. It isn't a pointer being emitted"); return null; } public override void EmitDictionaryEntry(ref ObjectDataBuilder builder, NodeFactory factory, GenericLookupResultContext dictionary, GenericDictionaryNode dictionaryNode) { FieldDesc instantiatedField = _field.GetNonRuntimeDeterminedFieldFromRuntimeDeterminedFieldViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); int offset = instantiatedField.Offset.AsInt; builder.EmitNaturalInt(offset); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("FieldOffset_"); sb.Append(nameMangler.GetMangledFieldName(_field)); } public override string ToString() => $"FieldOffset: {_field}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.FieldOffsetDictionarySlot(_field); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.FieldOffset, _field); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_field, ((FieldOffsetGenericLookupResult)other)._field); } protected override int GetHashCodeImpl() { return _field.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((FieldOffsetGenericLookupResult)obj)._field == _field; } } /// <summary> /// Generic lookup result that puts a vtable offset into the generic dictionary. /// </summary> internal sealed class VTableOffsetGenericLookupResult : GenericLookupResult { private MethodDesc _method; protected override int ClassCode => 386794182; public VTableOffsetGenericLookupResult(MethodDesc method) { Debug.Assert(method.IsRuntimeDeterminedExactMethod, "Concrete method in a generic dictionary?"); _method = method; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { Debug.Fail("GetTarget for a VTableOffsetGenericLookupResult doesn't make sense. It isn't a pointer being emitted"); return null; } public override void EmitDictionaryEntry(ref ObjectDataBuilder builder, NodeFactory factory, GenericLookupResultContext dictionary, GenericDictionaryNode dictionaryNode) { Debug.Fail("VTableOffset contents should only be generated into generic dictionaries at runtime"); builder.EmitNaturalInt(0); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("VTableOffset_"); sb.Append(nameMangler.GetMangledMethodName(_method)); } public override string ToString() => $"VTableOffset: {_method}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.VTableOffsetDictionarySlot(_method); } public override IEnumerable<DependencyNodeCore<NodeFactory>> NonRelocDependenciesFromUsage(NodeFactory factory) { MethodDesc canonMethod = _method.GetCanonMethodTarget(CanonicalFormKind.Universal); // If we're producing a full vtable for the type, we don't need to report virtual method use. if (factory.VTable(canonMethod.OwningType).HasFixedSlots) return Array.Empty<DependencyNodeCore<NodeFactory>>(); return new DependencyNodeCore<NodeFactory>[] { factory.VirtualMethodUse(canonMethod) }; } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.VtableOffset, _method); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_method, ((VTableOffsetGenericLookupResult)other)._method); } protected override int GetHashCodeImpl() { return _method.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((VTableOffsetGenericLookupResult)obj)._method == _method; } } /// <summary> /// Generic lookup result that points to a RuntimeMethodHandle. /// </summary> internal sealed class MethodHandleGenericLookupResult : GenericLookupResult { private MethodDesc _method; protected override int ClassCode => 394272689; public MethodHandleGenericLookupResult(MethodDesc method) { Debug.Assert(method.IsRuntimeDeterminedExactMethod, "Concrete method in a generic dictionary?"); _method = method; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { MethodDesc instantiatedMethod = _method.GetNonRuntimeDeterminedMethodFromRuntimeDeterminedMethodViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); return factory.RuntimeMethodHandle(instantiatedMethod); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("MethodHandle_"); sb.Append(nameMangler.GetMangledMethodName(_method)); } public override string ToString() => $"MethodHandle: {_method}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.MethodLdTokenDictionarySlot(_method); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.MethodLdToken, _method); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_method, ((MethodHandleGenericLookupResult)other)._method); } protected override int GetHashCodeImpl() { return _method.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((MethodHandleGenericLookupResult)obj)._method == _method; } } /// <summary> /// Generic lookup result that points to a RuntimeFieldHandle. /// </summary> internal sealed class FieldHandleGenericLookupResult : GenericLookupResult { private FieldDesc _field; protected override int ClassCode => -196995964; public FieldHandleGenericLookupResult(FieldDesc field) { Debug.Assert(field.OwningType.IsRuntimeDeterminedSubtype, "Concrete field in a generic dictionary?"); _field = field; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { FieldDesc instantiatedField = _field.GetNonRuntimeDeterminedFieldFromRuntimeDeterminedFieldViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); return factory.RuntimeFieldHandle(instantiatedField); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("FieldHandle_"); sb.Append(nameMangler.GetMangledFieldName(_field)); } public override string ToString() => $"FieldHandle: {_field}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.FieldLdTokenDictionarySlot(_field); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.FieldLdToken, _field); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_field, ((FieldHandleGenericLookupResult)other)._field); } protected override int GetHashCodeImpl() { return _field.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((FieldHandleGenericLookupResult)obj)._field == _field; } } /// <summary> /// Generic lookup result that points to a method dictionary. /// </summary> public sealed class MethodDictionaryGenericLookupResult : GenericLookupResult { private MethodDesc _method; protected override int ClassCode => -467418176; public MethodDictionaryGenericLookupResult(MethodDesc method) { Debug.Assert(method.IsRuntimeDeterminedExactMethod, "Concrete method in a generic dictionary?"); _method = method; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { MethodDesc instantiatedMethod = _method.GetNonRuntimeDeterminedMethodFromRuntimeDeterminedMethodViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); factory.TypeSystemContext.DetectGenericCycles(dictionary.Context, instantiatedMethod); return factory.MethodGenericDictionary(instantiatedMethod); } public override GenericLookupResultReferenceType LookupResultReferenceType(NodeFactory factory) { if (factory.CompilationModuleGroup.CanHaveReferenceThroughImportTable) { return GenericLookupResultReferenceType.ConditionalIndirect; } else { return GenericLookupResultReferenceType.Direct; } } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("MethodDictionary_"); sb.Append(nameMangler.GetMangledMethodName(_method)); } public MethodDesc Method => _method; public override string ToString() => $"MethodDictionary: {_method}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.MethodDictionaryDictionarySlot(_method); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.MethodDictionary, _method); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_method, ((MethodDictionaryGenericLookupResult)other)._method); } protected override int GetHashCodeImpl() { return _method.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((MethodDictionaryGenericLookupResult)obj)._method == _method; } } /// <summary> /// Generic lookup result that is a function pointer. /// </summary> internal sealed class MethodEntryGenericLookupResult : GenericLookupResult { private MethodDesc _method; private bool _isUnboxingThunk; protected override int ClassCode => 1572293098; public MethodEntryGenericLookupResult(MethodDesc method, bool isUnboxingThunk) { Debug.Assert(method.IsRuntimeDeterminedExactMethod); _method = method; _isUnboxingThunk = isUnboxingThunk; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { MethodDesc instantiatedMethod = _method.GetNonRuntimeDeterminedMethodFromRuntimeDeterminedMethodViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); return factory.FatFunctionPointer(instantiatedMethod, _isUnboxingThunk); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { if (!_isUnboxingThunk) sb.Append("MethodEntry_"); else sb.Append("UnboxMethodEntry_"); sb.Append(nameMangler.GetMangledMethodName(_method)); } public override string ToString() => $"MethodEntry: {_method}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { MethodDesc canonMethod = _method.GetCanonMethodTarget(CanonicalFormKind.Specific); // // For universal canonical methods, we don't need the unboxing stub really, because // the calling convention translation thunk will handle the unboxing (and we can avoid having a double thunk here) // We just need the flag in the native layout info signature indicating that we needed an unboxing stub // bool getUnboxingStubNode = _isUnboxingThunk && !canonMethod.IsCanonicalMethod(CanonicalFormKind.Universal); return factory.NativeLayout.MethodEntrypointDictionarySlot( _method, _isUnboxingThunk, factory.MethodEntrypoint(canonMethod, getUnboxingStubNode)); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { LookupResultType lookupResult = LookupResultType.Method; writer.WriteData(LookupResultReferenceType(factory), lookupResult, _method); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { var otherEntry = (MethodEntryGenericLookupResult)other; int result = (_isUnboxingThunk ? 1 : 0) - (otherEntry._isUnboxingThunk ? 1 : 0); if (result != 0) return result; return comparer.Compare(_method, otherEntry._method); } protected override int GetHashCodeImpl() { return _method.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((MethodEntryGenericLookupResult)obj)._method == _method && ((MethodEntryGenericLookupResult)obj)._isUnboxingThunk == _isUnboxingThunk; } } /// <summary> /// Generic lookup result that points to a dispatch cell. /// </summary> internal sealed class VirtualDispatchCellGenericLookupResult : GenericLookupResult { private MethodDesc _method; protected override int ClassCode => 643566930; public VirtualDispatchCellGenericLookupResult(MethodDesc method) { Debug.Assert(method.IsRuntimeDeterminedExactMethod); Debug.Assert(method.IsVirtual); Debug.Assert(method.OwningType.IsInterface); _method = method; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext context) { MethodDesc instantiatedMethod = _method.GetNonRuntimeDeterminedMethodFromRuntimeDeterminedMethodViaSubstitution(context.TypeInstantiation, context.MethodInstantiation); TypeSystemEntity contextOwner = context.Context; GenericDictionaryNode dictionary = contextOwner is TypeDesc ? (GenericDictionaryNode)factory.TypeGenericDictionary((TypeDesc)contextOwner) : (GenericDictionaryNode)factory.MethodGenericDictionary((MethodDesc)contextOwner); return factory.InterfaceDispatchCell(instantiatedMethod, dictionary.GetMangledName(factory.NameMangler)); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("DispatchCell_"); sb.Append(nameMangler.GetMangledMethodName(_method)); } public override string ToString() => $"DispatchCell: {_method}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.InterfaceCellDictionarySlot(_method); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.InterfaceDispatchCell, _method); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_method, ((VirtualDispatchCellGenericLookupResult)other)._method); } protected override int GetHashCodeImpl() { return _method.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((VirtualDispatchCellGenericLookupResult)obj)._method == _method; } } /// <summary> /// Generic lookup result that points to the non-GC static base of a type. /// </summary> internal sealed class TypeNonGCStaticBaseGenericLookupResult : GenericLookupResult { private MetadataType _type; protected override int ClassCode => -328863267; public TypeNonGCStaticBaseGenericLookupResult(TypeDesc type) { Debug.Assert(type.IsRuntimeDeterminedSubtype, "Concrete static base in a generic dictionary?"); Debug.Assert(type is MetadataType); _type = (MetadataType)type; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { var instantiatedType = (MetadataType)_type.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); return factory.TypeNonGCStaticsSymbol(instantiatedType); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("NonGCStaticBase_"); sb.Append(nameMangler.GetMangledTypeName(_type)); } public override string ToString() => $"NonGCStaticBase: {_type}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.NonGcStaticDictionarySlot(_type); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.NonGcStatic, _type); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_type, ((TypeNonGCStaticBaseGenericLookupResult)other)._type); } protected override int GetHashCodeImpl() { return _type.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((TypeNonGCStaticBaseGenericLookupResult)obj)._type == _type; } } /// <summary> /// Generic lookup result that points to the threadstatic base index of a type. /// </summary> internal sealed class TypeThreadStaticBaseIndexGenericLookupResult : GenericLookupResult { private MetadataType _type; protected override int ClassCode => -177446371; public TypeThreadStaticBaseIndexGenericLookupResult(TypeDesc type) { Debug.Assert(type.IsRuntimeDeterminedSubtype, "Concrete static base in a generic dictionary?"); Debug.Assert(type is MetadataType); _type = (MetadataType)type; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { var instantiatedType = (MetadataType)_type.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); return factory.TypeThreadStaticIndex(instantiatedType); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("ThreadStaticBase_"); sb.Append(nameMangler.GetMangledTypeName(_type)); } public override string ToString() => $"ThreadStaticBase: {_type}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.ThreadStaticBaseIndexDictionarySlotNode(_type); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { // TODO throw new NotImplementedException(); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_type, ((TypeThreadStaticBaseIndexGenericLookupResult)other)._type); } protected override int GetHashCodeImpl() { return _type.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((TypeThreadStaticBaseIndexGenericLookupResult)obj)._type == _type; } } /// <summary> /// Generic lookup result that points to the GC static base of a type. /// </summary> public sealed class TypeGCStaticBaseGenericLookupResult : GenericLookupResult { private MetadataType _type; protected override int ClassCode => 429225829; public TypeGCStaticBaseGenericLookupResult(TypeDesc type) { Debug.Assert(type.IsRuntimeDeterminedSubtype, "Concrete static base in a generic dictionary?"); Debug.Assert(type is MetadataType); _type = (MetadataType)type; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { var instantiatedType = (MetadataType)_type.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); return factory.TypeGCStaticsSymbol(instantiatedType); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("GCStaticBase_"); sb.Append(nameMangler.GetMangledTypeName(_type)); } public MetadataType Type => _type; public override string ToString() => $"GCStaticBase: {_type}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.GcStaticDictionarySlot(_type); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.GcStatic, _type); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_type, ((TypeGCStaticBaseGenericLookupResult)other)._type); } protected override int GetHashCodeImpl() { return _type.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((TypeGCStaticBaseGenericLookupResult)obj)._type == _type; } } /// <summary> /// Generic lookup result that points to an object allocator. /// </summary> internal sealed class ObjectAllocatorGenericLookupResult : GenericLookupResult { private TypeDesc _type; protected override int ClassCode => -1671431655; public ObjectAllocatorGenericLookupResult(TypeDesc type) { Debug.Assert(type.IsRuntimeDeterminedSubtype, "Concrete type in a generic dictionary?"); _type = type; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { TypeDesc instantiatedType = _type.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); return factory.ExternSymbol(JitHelper.GetNewObjectHelperForType(instantiatedType)); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("AllocObject_"); sb.Append(nameMangler.GetMangledTypeName(_type)); } public override string ToString() => $"AllocObject: {_type}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.AllocateObjectDictionarySlot(_type); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.AllocObject, _type); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_type, ((ObjectAllocatorGenericLookupResult)other)._type); } protected override int GetHashCodeImpl() { return _type.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((ObjectAllocatorGenericLookupResult)obj)._type == _type; } } /// <summary> /// Generic lookup result that points to an array allocator. /// </summary> internal sealed class ArrayAllocatorGenericLookupResult : GenericLookupResult { private TypeDesc _type; protected override int ClassCode => -927905284; public ArrayAllocatorGenericLookupResult(TypeDesc type) { Debug.Assert(type.IsRuntimeDeterminedSubtype, "Concrete type in a generic dictionary?"); _type = type; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { TypeDesc instantiatedType = _type.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); Debug.Assert(instantiatedType.IsArray); return factory.ExternSymbol(JitHelper.GetNewArrayHelperForType(instantiatedType)); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("AllocArray_"); sb.Append(nameMangler.GetMangledTypeName(_type)); } public override string ToString() => $"AllocArray: {_type}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.AllocateArrayDictionarySlot(_type); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.AllocArray, _type); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_type, ((ArrayAllocatorGenericLookupResult)other)._type); } protected override int GetHashCodeImpl() { return _type.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((ArrayAllocatorGenericLookupResult)obj)._type == _type; } } /// <summary> /// Generic lookup result that points to an cast helper. /// </summary> internal sealed class CastClassGenericLookupResult : GenericLookupResult { private TypeDesc _type; protected override int ClassCode => 1691016084; public CastClassGenericLookupResult(TypeDesc type) { Debug.Assert(type.IsRuntimeDeterminedSubtype, "Concrete type in a generic dictionary?"); _type = type; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { TypeDesc instantiatedType = _type.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); return factory.ExternSymbol(JitHelper.GetCastingHelperNameForType(instantiatedType, true)); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("CastClass_"); sb.Append(nameMangler.GetMangledTypeName(_type)); } public override string ToString() => $"CastClass: {_type}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.CastClassDictionarySlot(_type); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.CastClass, _type); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_type, ((CastClassGenericLookupResult)other)._type); } protected override int GetHashCodeImpl() { return _type.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((CastClassGenericLookupResult)obj)._type == _type; } } /// <summary> /// Generic lookup result that points to an isInst helper. /// </summary> internal sealed class IsInstGenericLookupResult : GenericLookupResult { private TypeDesc _type; protected override int ClassCode => 1724059349; public IsInstGenericLookupResult(TypeDesc type) { Debug.Assert(type.IsRuntimeDeterminedSubtype, "Concrete type in a generic dictionary?"); _type = type; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { TypeDesc instantiatedType = _type.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); return factory.ExternSymbol(JitHelper.GetCastingHelperNameForType(instantiatedType, false)); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("IsInst_"); sb.Append(nameMangler.GetMangledTypeName(_type)); } public override string ToString() => $"IsInst: {_type}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.IsInstDictionarySlot(_type); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.IsInst, _type); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_type, ((IsInstGenericLookupResult)other)._type); } protected override int GetHashCodeImpl() { return _type.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((IsInstGenericLookupResult)obj)._type == _type; } } internal sealed class DefaultConstructorLookupResult : GenericLookupResult { private TypeDesc _type; protected override int ClassCode => -1391112482; public DefaultConstructorLookupResult(TypeDesc type) { Debug.Assert(type.IsRuntimeDeterminedSubtype, "Concrete type in a generic dictionary?"); _type = type; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { TypeDesc instantiatedType = _type.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); MethodDesc defaultCtor = Compilation.GetConstructorForCreateInstanceIntrinsic(instantiatedType); return factory.CanonicalEntrypoint(defaultCtor); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("DefaultCtor_"); sb.Append(nameMangler.GetMangledTypeName(_type)); } public override string ToString() => $"DefaultConstructor: {_type}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.DefaultConstructorDictionarySlot(_type); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.DefaultCtor, _type); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_type, ((DefaultConstructorLookupResult)other)._type); } protected override int GetHashCodeImpl() { return _type.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((DefaultConstructorLookupResult)obj)._type == _type; } } internal sealed class CallingConventionConverterLookupResult : GenericLookupResult { private CallingConventionConverterKey _callingConventionConverter; protected override int ClassCode => -581806472; public CallingConventionConverterLookupResult(CallingConventionConverterKey callingConventionConverter) { _callingConventionConverter = callingConventionConverter; Debug.Assert(Internal.Runtime.UniversalGenericParameterLayout.MethodSignatureHasVarsNeedingCallingConventionConverter(callingConventionConverter.Signature)); } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { Debug.Fail("GetTarget for a CallingConventionConverterLookupResult doesn't make sense. It isn't a pointer being emitted"); return null; } public override void EmitDictionaryEntry(ref ObjectDataBuilder builder, NodeFactory factory, GenericLookupResultContext dictionary, GenericDictionaryNode dictionaryNode) { Debug.Fail("CallingConventionConverterLookupResult contents should only be generated into generic dictionaries at runtime"); builder.EmitNaturalInt(0); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("CallingConventionConverterLookupResult_"); sb.Append(_callingConventionConverter.GetName()); } public override string ToString() => "CallingConventionConverterLookupResult"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.CallingConventionConverter(_callingConventionConverter); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { // TODO throw new NotImplementedException(); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { var otherEntry = (CallingConventionConverterLookupResult)other; int result = (int)(_callingConventionConverter.ConverterKind - otherEntry._callingConventionConverter.ConverterKind); if (result != 0) return result; return comparer.Compare(_callingConventionConverter.Signature, otherEntry._callingConventionConverter.Signature); } protected override int GetHashCodeImpl() { return _callingConventionConverter.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((CallingConventionConverterLookupResult)obj)._callingConventionConverter.Equals(_callingConventionConverter); } } internal sealed class TypeSizeLookupResult : GenericLookupResult { private TypeDesc _type; protected override int ClassCode => -367755250; public TypeSizeLookupResult(TypeDesc type) { _type = type; Debug.Assert(type.IsRuntimeDeterminedSubtype, "Concrete type in a generic dictionary?"); } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { Debug.Fail("GetTarget for a TypeSizeLookupResult doesn't make sense. It isn't a pointer being emitted"); return null; } public override void EmitDictionaryEntry(ref ObjectDataBuilder builder, NodeFactory factory, GenericLookupResultContext dictionary, GenericDictionaryNode dictionaryNode) { TypeDesc instantiatedType = _type.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); int typeSize; if (_type.IsDefType) { typeSize = ((DefType)_type).InstanceFieldSize.AsInt; } else { typeSize = factory.TypeSystemContext.Target.PointerSize; } builder.EmitNaturalInt(typeSize); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("TypeSize_"); sb.Append(nameMangler.GetMangledTypeName(_type)); } public override string ToString() => $"TypeSize: {_type}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.TypeSizeDictionarySlot(_type); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.TypeSize, _type); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_type, ((TypeSizeLookupResult)other)._type); } protected override int GetHashCodeImpl() { return _type.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((TypeSizeLookupResult)obj)._type == _type; } } internal sealed class ConstrainedMethodUseLookupResult : GenericLookupResult { MethodDesc _constrainedMethod; TypeDesc _constraintType; bool _directCall; protected override int ClassCode => -1525377658; public ConstrainedMethodUseLookupResult(MethodDesc constrainedMethod, TypeDesc constraintType, bool directCall) { _constrainedMethod = constrainedMethod; _constraintType = constraintType; _directCall = directCall; Debug.Assert(_constraintType.IsRuntimeDeterminedSubtype || _constrainedMethod.IsRuntimeDeterminedExactMethod, "Concrete type in a generic dictionary?"); Debug.Assert(!_constrainedMethod.HasInstantiation || !_directCall, "Direct call to constrained generic method isn't supported"); } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { MethodDesc instantiatedConstrainedMethod = _constrainedMethod.GetNonRuntimeDeterminedMethodFromRuntimeDeterminedMethodViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); TypeDesc instantiatedConstraintType = _constraintType.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); MethodDesc implMethod = instantiatedConstrainedMethod; if (implMethod.OwningType.IsInterface) { implMethod = instantiatedConstraintType.GetClosestDefType().ResolveVariantInterfaceMethodToVirtualMethodOnType(implMethod); } implMethod = instantiatedConstraintType.GetClosestDefType().FindVirtualFunctionTargetMethodOnObjectType(implMethod); // AOT use of this generic lookup is restricted to finding methods on valuetypes (runtime usage of this slot in universal generics is more flexible) Debug.Assert(instantiatedConstraintType.IsValueType); Debug.Assert(implMethod.OwningType == instantiatedConstraintType); if (implMethod.HasInstantiation) { return factory.ExactCallableAddress(implMethod); } else { return factory.CanonicalEntrypoint(implMethod); } } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("ConstrainedMethodUseLookupResult_"); sb.Append(nameMangler.GetMangledTypeName(_constraintType)); sb.Append(nameMangler.GetMangledMethodName(_constrainedMethod)); if (_directCall) sb.Append("Direct"); } public override string ToString() => $"ConstrainedMethodUseLookupResult: {_constraintType} {_constrainedMethod} {_directCall}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.ConstrainedMethodUse(_constrainedMethod, _constraintType, _directCall); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { // TODO throw new NotImplementedException(); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { var otherResult = (ConstrainedMethodUseLookupResult)other; int result = (_directCall ? 1 : 0) - (otherResult._directCall ? 1 : 0); if (result != 0) return result; result = comparer.Compare(_constraintType, otherResult._constraintType); if (result != 0) return result; return comparer.Compare(_constrainedMethod, otherResult._constrainedMethod); } protected override int GetHashCodeImpl() { return _constrainedMethod.GetHashCode() * 13 + _constraintType.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { var other = (ConstrainedMethodUseLookupResult)obj; return _constrainedMethod == other._constrainedMethod && _constraintType == other._constraintType && _directCall == other._directCall; } } public sealed class IntegerLookupResult : GenericLookupResult { int _integerValue; public IntegerLookupResult(int integer) { _integerValue = integer; } public int IntegerValue => _integerValue; protected override int ClassCode => 385752509; public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { return null; } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("IntegerLookupResult_").Append(_integerValue.ToString("x")); } public override string ToString() { return "IntegerLookupResult_" + _integerValue.ToString("x"); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { IntegerLookupResult lookupResultOther = (IntegerLookupResult)other; if (lookupResultOther._integerValue == _integerValue) return 0; return _integerValue > lookupResultOther._integerValue ? 1 : -1; } protected override bool EqualsImpl(GenericLookupResult other) { IntegerLookupResult lookupResultOther = (IntegerLookupResult)other; return lookupResultOther._integerValue == _integerValue; } protected override int GetHashCodeImpl() { return _integerValue; } public override void EmitDictionaryEntry(ref ObjectDataBuilder builder, NodeFactory factory, GenericLookupResultContext dictionary, GenericDictionaryNode dictionaryNode) { builder.EmitNaturalInt(_integerValue); } public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.IntegerSlot(_integerValue); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteIntegerSlot(_integerValue); } } public sealed class PointerToSlotLookupResult : GenericLookupResult { int _slotIndex; public PointerToSlotLookupResult(int slotIndex) { _slotIndex = slotIndex; } public int SlotIndex => _slotIndex; protected override int ClassCode => 551050755; public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { return null; } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("PointerToSlotLookupResult_").Append(_slotIndex.ToString("x")); } public override string ToString() { return "PointerToSlotLookupResult_" + _slotIndex.ToString("x"); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { PointerToSlotLookupResult pointerToSlotResultOther = (PointerToSlotLookupResult)other; if (pointerToSlotResultOther._slotIndex == _slotIndex) return 0; return _slotIndex > pointerToSlotResultOther._slotIndex ? 1 : -1; } protected override bool EqualsImpl(GenericLookupResult other) { PointerToSlotLookupResult pointerToSlotResultOther = (PointerToSlotLookupResult)other; return pointerToSlotResultOther._slotIndex == _slotIndex; } protected override int GetHashCodeImpl() { return _slotIndex; } public override void EmitDictionaryEntry(ref ObjectDataBuilder builder, NodeFactory factory, GenericLookupResultContext dictionary, GenericDictionaryNode dictionaryNode) { builder.EmitPointerReloc(dictionaryNode, _slotIndex * factory.Target.PointerSize); } public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.PointerToOtherSlot(_slotIndex); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { // Under no circumstance should we attempt to write out a pointer to slot result throw new InvalidProgramException(); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Diagnostics; using Internal.IL; using Internal.Text; using Internal.TypeSystem; using ILCompiler.DependencyAnalysisFramework; namespace ILCompiler.DependencyAnalysis { public enum GenericLookupResultReferenceType { Direct, // The slot stores a direct pointer to the target Indirect, // The slot is an indirection cell which points to the direct pointer ConditionalIndirect, // The slot may be a direct pointer or an indirection cell, depending on the last digit } // Represents a generic lookup within a canonical method body. // TODO: unify with NativeFormat.FixupSignatureKind public enum LookupResultType { Invalid, MethodTable, // a type UnwrapNullable, // a type (The type T described by a type spec that is generic over Nullable<T>) NonGcStatic, // the non-gc statics of a type GcStatic, // the gc statics of a type Method, // a method InterfaceDispatchCell, // the dispatch cell for calling an interface method MethodDictionary, // a dictionary for calling a generic method UnboxingStub, // the unboxing stub for a method ArrayType, // an array of type DefaultCtor, // default ctor of a type AllocObject, // the allocator of a type GvmVtableOffset, // vtable offset of a generic virtual method ProfileCounter, // profiling counter cell MethodLdToken, // a ldtoken result for a method FieldLdToken, // a ldtoken result for a field Field, // a field descriptor IsInst, // isinst helper CastClass, // castclass helper AllocArray, // the array allocator of a type TypeSize, // size of the type FieldOffset, // field offset CallingConvention_NoInstParam, // CallingConventionConverterThunk NO_INSTANTIATING_PARAM CallingConvention_HasInstParam, // CallingConventionConverterThunk HAS_INSTANTIATING_PARAM CallingConvention_MaybeInstParam, // CallingConventionConverterThunk MAYBE_INSTANTIATING_PARAM VtableOffset, // Offset of a virtual method into the type's vtable Constrained, // ConstrainedCallDesc ConstrainedDirect, // Direct ConstrainedCallDesc Integer, // Integer UnboxingMethod, // UnboxingMethod } public interface IGenericLookupResultTocWriter { void WriteData(GenericLookupResultReferenceType referenceType, LookupResultType slotType, TypeSystemEntity context); void WriteIntegerSlot(int value); } public struct GenericLookupResultContext { private readonly TypeSystemEntity _canonicalOwner; public readonly Instantiation TypeInstantiation; public readonly Instantiation MethodInstantiation; public TypeSystemEntity Context { get { if (_canonicalOwner is TypeDesc) { var owningTypeDefinition = (MetadataType)((TypeDesc)_canonicalOwner).GetTypeDefinition(); Debug.Assert(owningTypeDefinition.Instantiation.Length == TypeInstantiation.Length); Debug.Assert(MethodInstantiation.IsNull || MethodInstantiation.Length == 0); return owningTypeDefinition.MakeInstantiatedType(TypeInstantiation); } Debug.Assert(_canonicalOwner is MethodDesc); MethodDesc owningMethodDefinition = ((MethodDesc)_canonicalOwner).GetTypicalMethodDefinition(); Debug.Assert(owningMethodDefinition.Instantiation.Length == MethodInstantiation.Length); MethodDesc concreteMethod = owningMethodDefinition; if (!TypeInstantiation.IsNull && TypeInstantiation.Length > 0) { TypeDesc owningType = owningMethodDefinition.OwningType; Debug.Assert(owningType.Instantiation.Length == TypeInstantiation.Length); concreteMethod = owningType.Context.GetMethodForInstantiatedType(owningMethodDefinition, ((MetadataType)owningType).MakeInstantiatedType(TypeInstantiation)); } else { Debug.Assert(owningMethodDefinition.OwningType.Instantiation.IsNull || owningMethodDefinition.OwningType.Instantiation.Length == 0); } return concreteMethod.MakeInstantiatedMethod(MethodInstantiation); } } public GenericLookupResultContext(TypeSystemEntity canonicalOwner, Instantiation typeInst, Instantiation methodInst) { _canonicalOwner = canonicalOwner; TypeInstantiation = typeInst; MethodInstantiation = methodInst; } } /// <summary> /// Represents the result of a generic lookup within a canonical method body. /// The concrete artifact the generic lookup will result in can only be determined after substituting /// runtime determined types with a concrete generic context. Use /// <see cref="GetTarget(NodeFactory, Instantiation, Instantiation, GenericDictionaryNode)"/> to obtain the concrete /// node the result points to. /// </summary> public abstract class GenericLookupResult { protected abstract int ClassCode { get; } public abstract ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary); public abstract void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb); public abstract override string ToString(); protected abstract int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer); protected abstract bool EqualsImpl(GenericLookupResult obj); protected abstract int GetHashCodeImpl(); public sealed override bool Equals(object obj) { GenericLookupResult other = obj as GenericLookupResult; if (obj == null) return false; return ClassCode == other.ClassCode && EqualsImpl(other); } public sealed override int GetHashCode() { return ClassCode * 31 + GetHashCodeImpl(); } public virtual void EmitDictionaryEntry(ref ObjectDataBuilder builder, NodeFactory factory, GenericLookupResultContext dictionary, GenericDictionaryNode dictionaryNode) { ISymbolNode target; try { target = GetTarget(factory, dictionary); } catch (TypeSystemException) { target = null; } if (target == null) { builder.EmitZeroPointer(); } else if (LookupResultReferenceType(factory) == GenericLookupResultReferenceType.ConditionalIndirect) { builder.EmitPointerRelocOrIndirectionReference(target); } else { builder.EmitPointerReloc(target); } } public virtual GenericLookupResultReferenceType LookupResultReferenceType(NodeFactory factory) { return GenericLookupResultReferenceType.Direct; } public abstract NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory); public abstract void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer); // Call this api to get non-reloc dependencies that arise from use of a dictionary lookup public virtual IEnumerable<DependencyNodeCore<NodeFactory>> NonRelocDependenciesFromUsage(NodeFactory factory) { return Array.Empty<DependencyNodeCore<NodeFactory>>(); } public class Comparer : IComparer<GenericLookupResult> { private TypeSystemComparer _comparer; public Comparer(TypeSystemComparer comparer) { _comparer = comparer; } public int Compare(GenericLookupResult x, GenericLookupResult y) { if (x == y) { return 0; } int codeX = x.ClassCode; int codeY = y.ClassCode; if (codeX == codeY) { Debug.Assert(x.GetType() == y.GetType()); int result = x.CompareToImpl(y, _comparer); // We did a reference equality check above so an "Equal" result is not expected Debug.Assert(result != 0); return result; } else { Debug.Assert(x.GetType() != y.GetType()); return codeX > codeY ? -1 : 1; } } } } /// <summary> /// Generic lookup result that points to an MethodTable. /// </summary> public sealed class TypeHandleGenericLookupResult : GenericLookupResult { private TypeDesc _type; protected override int ClassCode => 1623839081; public TypeHandleGenericLookupResult(TypeDesc type) { Debug.Assert(type.IsRuntimeDeterminedSubtype, "Concrete type in a generic dictionary?"); _type = type; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { // We are getting a maximally constructable type symbol because this might be something passed to newobj. TypeDesc instantiatedType = _type.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); factory.TypeSystemContext.DetectGenericCycles(dictionary.Context, instantiatedType); return factory.MaximallyConstructableType(instantiatedType); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("TypeHandle_"); sb.Append(nameMangler.GetMangledTypeName(_type)); } public TypeDesc Type => _type; public override string ToString() => $"TypeHandle: {_type}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.TypeHandleDictionarySlot(_type); } public override GenericLookupResultReferenceType LookupResultReferenceType(NodeFactory factory) { if (factory.CompilationModuleGroup.CanHaveReferenceThroughImportTable) { return GenericLookupResultReferenceType.ConditionalIndirect; } else { return GenericLookupResultReferenceType.Direct; } } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.MethodTable, _type); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_type, ((TypeHandleGenericLookupResult)other)._type); } protected override int GetHashCodeImpl() { return _type.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((TypeHandleGenericLookupResult)obj)._type == _type; } } /// <summary> /// Generic lookup result that points to an MethodTable where if the type is Nullable&lt;X&gt; the MethodTable is X /// </summary> public sealed class UnwrapNullableTypeHandleGenericLookupResult : GenericLookupResult { private TypeDesc _type; protected override int ClassCode => 53521918; public UnwrapNullableTypeHandleGenericLookupResult(TypeDesc type) { Debug.Assert(type.IsRuntimeDeterminedSubtype, "Concrete type in a generic dictionary?"); _type = type; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { TypeDesc instantiatedType = _type.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); // Unwrap the nullable type if necessary if (instantiatedType.IsNullable) instantiatedType = instantiatedType.Instantiation[0]; // We are getting a constructed type symbol because this might be something passed to newobj. return factory.ConstructedTypeSymbol(instantiatedType); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("UnwrapNullable_"); sb.Append(nameMangler.GetMangledTypeName(_type)); } public TypeDesc Type => _type; public override string ToString() => $"UnwrapNullable: {_type}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.UnwrapNullableTypeDictionarySlot(_type); } public override GenericLookupResultReferenceType LookupResultReferenceType(NodeFactory factory) { if (factory.CompilationModuleGroup.CanHaveReferenceThroughImportTable) { return GenericLookupResultReferenceType.ConditionalIndirect; } else { return GenericLookupResultReferenceType.Direct; } } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.UnwrapNullable, _type); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_type, ((UnwrapNullableTypeHandleGenericLookupResult)other)._type); } protected override int GetHashCodeImpl() { return _type.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((UnwrapNullableTypeHandleGenericLookupResult)obj)._type == _type; } } /// <summary> /// Generic lookup result that puts a field offset into the generic dictionary. /// </summary> internal sealed class FieldOffsetGenericLookupResult : GenericLookupResult { private FieldDesc _field; protected override int ClassCode => -1670293557; public FieldOffsetGenericLookupResult(FieldDesc field) { Debug.Assert(field.OwningType.IsRuntimeDeterminedSubtype, "Concrete field in a generic dictionary?"); _field = field; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { Debug.Fail("GetTarget for a FieldOffsetGenericLookupResult doesn't make sense. It isn't a pointer being emitted"); return null; } public override void EmitDictionaryEntry(ref ObjectDataBuilder builder, NodeFactory factory, GenericLookupResultContext dictionary, GenericDictionaryNode dictionaryNode) { FieldDesc instantiatedField = _field.GetNonRuntimeDeterminedFieldFromRuntimeDeterminedFieldViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); int offset = instantiatedField.Offset.AsInt; builder.EmitNaturalInt(offset); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("FieldOffset_"); sb.Append(nameMangler.GetMangledFieldName(_field)); } public override string ToString() => $"FieldOffset: {_field}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.FieldOffsetDictionarySlot(_field); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.FieldOffset, _field); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_field, ((FieldOffsetGenericLookupResult)other)._field); } protected override int GetHashCodeImpl() { return _field.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((FieldOffsetGenericLookupResult)obj)._field == _field; } } /// <summary> /// Generic lookup result that puts a vtable offset into the generic dictionary. /// </summary> internal sealed class VTableOffsetGenericLookupResult : GenericLookupResult { private MethodDesc _method; protected override int ClassCode => 386794182; public VTableOffsetGenericLookupResult(MethodDesc method) { Debug.Assert(method.IsRuntimeDeterminedExactMethod, "Concrete method in a generic dictionary?"); _method = method; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { Debug.Fail("GetTarget for a VTableOffsetGenericLookupResult doesn't make sense. It isn't a pointer being emitted"); return null; } public override void EmitDictionaryEntry(ref ObjectDataBuilder builder, NodeFactory factory, GenericLookupResultContext dictionary, GenericDictionaryNode dictionaryNode) { Debug.Fail("VTableOffset contents should only be generated into generic dictionaries at runtime"); builder.EmitNaturalInt(0); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("VTableOffset_"); sb.Append(nameMangler.GetMangledMethodName(_method)); } public override string ToString() => $"VTableOffset: {_method}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.VTableOffsetDictionarySlot(_method); } public override IEnumerable<DependencyNodeCore<NodeFactory>> NonRelocDependenciesFromUsage(NodeFactory factory) { MethodDesc canonMethod = _method.GetCanonMethodTarget(CanonicalFormKind.Universal); // If we're producing a full vtable for the type, we don't need to report virtual method use. if (factory.VTable(canonMethod.OwningType).HasFixedSlots) return Array.Empty<DependencyNodeCore<NodeFactory>>(); return new DependencyNodeCore<NodeFactory>[] { factory.VirtualMethodUse(canonMethod) }; } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.VtableOffset, _method); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_method, ((VTableOffsetGenericLookupResult)other)._method); } protected override int GetHashCodeImpl() { return _method.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((VTableOffsetGenericLookupResult)obj)._method == _method; } } /// <summary> /// Generic lookup result that points to a RuntimeMethodHandle. /// </summary> internal sealed class MethodHandleGenericLookupResult : GenericLookupResult { private MethodDesc _method; protected override int ClassCode => 394272689; public MethodHandleGenericLookupResult(MethodDesc method) { Debug.Assert(method.IsRuntimeDeterminedExactMethod, "Concrete method in a generic dictionary?"); _method = method; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { MethodDesc instantiatedMethod = _method.GetNonRuntimeDeterminedMethodFromRuntimeDeterminedMethodViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); return factory.RuntimeMethodHandle(instantiatedMethod); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("MethodHandle_"); sb.Append(nameMangler.GetMangledMethodName(_method)); } public override string ToString() => $"MethodHandle: {_method}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.MethodLdTokenDictionarySlot(_method); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.MethodLdToken, _method); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_method, ((MethodHandleGenericLookupResult)other)._method); } protected override int GetHashCodeImpl() { return _method.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((MethodHandleGenericLookupResult)obj)._method == _method; } } /// <summary> /// Generic lookup result that points to a RuntimeFieldHandle. /// </summary> internal sealed class FieldHandleGenericLookupResult : GenericLookupResult { private FieldDesc _field; protected override int ClassCode => -196995964; public FieldHandleGenericLookupResult(FieldDesc field) { Debug.Assert(field.OwningType.IsRuntimeDeterminedSubtype, "Concrete field in a generic dictionary?"); _field = field; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { FieldDesc instantiatedField = _field.GetNonRuntimeDeterminedFieldFromRuntimeDeterminedFieldViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); return factory.RuntimeFieldHandle(instantiatedField); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("FieldHandle_"); sb.Append(nameMangler.GetMangledFieldName(_field)); } public override string ToString() => $"FieldHandle: {_field}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.FieldLdTokenDictionarySlot(_field); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.FieldLdToken, _field); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_field, ((FieldHandleGenericLookupResult)other)._field); } protected override int GetHashCodeImpl() { return _field.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((FieldHandleGenericLookupResult)obj)._field == _field; } } /// <summary> /// Generic lookup result that points to a method dictionary. /// </summary> public sealed class MethodDictionaryGenericLookupResult : GenericLookupResult { private MethodDesc _method; protected override int ClassCode => -467418176; public MethodDictionaryGenericLookupResult(MethodDesc method) { Debug.Assert(method.IsRuntimeDeterminedExactMethod, "Concrete method in a generic dictionary?"); _method = method; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { MethodDesc instantiatedMethod = _method.GetNonRuntimeDeterminedMethodFromRuntimeDeterminedMethodViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); factory.TypeSystemContext.DetectGenericCycles(dictionary.Context, instantiatedMethod); return factory.MethodGenericDictionary(instantiatedMethod); } public override GenericLookupResultReferenceType LookupResultReferenceType(NodeFactory factory) { if (factory.CompilationModuleGroup.CanHaveReferenceThroughImportTable) { return GenericLookupResultReferenceType.ConditionalIndirect; } else { return GenericLookupResultReferenceType.Direct; } } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("MethodDictionary_"); sb.Append(nameMangler.GetMangledMethodName(_method)); } public MethodDesc Method => _method; public override string ToString() => $"MethodDictionary: {_method}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.MethodDictionaryDictionarySlot(_method); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.MethodDictionary, _method); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_method, ((MethodDictionaryGenericLookupResult)other)._method); } protected override int GetHashCodeImpl() { return _method.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((MethodDictionaryGenericLookupResult)obj)._method == _method; } } /// <summary> /// Generic lookup result that is a function pointer. /// </summary> internal sealed class MethodEntryGenericLookupResult : GenericLookupResult { private MethodDesc _method; private bool _isUnboxingThunk; protected override int ClassCode => 1572293098; public MethodEntryGenericLookupResult(MethodDesc method, bool isUnboxingThunk) { Debug.Assert(method.IsRuntimeDeterminedExactMethod); _method = method; _isUnboxingThunk = isUnboxingThunk; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { MethodDesc instantiatedMethod = _method.GetNonRuntimeDeterminedMethodFromRuntimeDeterminedMethodViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); return factory.FatFunctionPointer(instantiatedMethod, _isUnboxingThunk); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { if (!_isUnboxingThunk) sb.Append("MethodEntry_"); else sb.Append("UnboxMethodEntry_"); sb.Append(nameMangler.GetMangledMethodName(_method)); } public override string ToString() => $"MethodEntry: {_method}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { MethodDesc canonMethod = _method.GetCanonMethodTarget(CanonicalFormKind.Specific); // // For universal canonical methods, we don't need the unboxing stub really, because // the calling convention translation thunk will handle the unboxing (and we can avoid having a double thunk here) // We just need the flag in the native layout info signature indicating that we needed an unboxing stub // bool getUnboxingStubNode = _isUnboxingThunk && !canonMethod.IsCanonicalMethod(CanonicalFormKind.Universal); return factory.NativeLayout.MethodEntrypointDictionarySlot( _method, _isUnboxingThunk, factory.MethodEntrypoint(canonMethod, getUnboxingStubNode)); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { LookupResultType lookupResult = LookupResultType.Method; writer.WriteData(LookupResultReferenceType(factory), lookupResult, _method); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { var otherEntry = (MethodEntryGenericLookupResult)other; int result = (_isUnboxingThunk ? 1 : 0) - (otherEntry._isUnboxingThunk ? 1 : 0); if (result != 0) return result; return comparer.Compare(_method, otherEntry._method); } protected override int GetHashCodeImpl() { return _method.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((MethodEntryGenericLookupResult)obj)._method == _method && ((MethodEntryGenericLookupResult)obj)._isUnboxingThunk == _isUnboxingThunk; } } /// <summary> /// Generic lookup result that points to a dispatch cell. /// </summary> internal sealed class VirtualDispatchCellGenericLookupResult : GenericLookupResult { private MethodDesc _method; protected override int ClassCode => 643566930; public VirtualDispatchCellGenericLookupResult(MethodDesc method) { Debug.Assert(method.IsRuntimeDeterminedExactMethod); Debug.Assert(method.IsVirtual); Debug.Assert(method.OwningType.IsInterface); _method = method; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext context) { MethodDesc instantiatedMethod = _method.GetNonRuntimeDeterminedMethodFromRuntimeDeterminedMethodViaSubstitution(context.TypeInstantiation, context.MethodInstantiation); TypeSystemEntity contextOwner = context.Context; GenericDictionaryNode dictionary = contextOwner is TypeDesc ? (GenericDictionaryNode)factory.TypeGenericDictionary((TypeDesc)contextOwner) : (GenericDictionaryNode)factory.MethodGenericDictionary((MethodDesc)contextOwner); return factory.InterfaceDispatchCell(instantiatedMethod, dictionary.GetMangledName(factory.NameMangler)); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("DispatchCell_"); sb.Append(nameMangler.GetMangledMethodName(_method)); } public override string ToString() => $"DispatchCell: {_method}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.InterfaceCellDictionarySlot(_method); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.InterfaceDispatchCell, _method); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_method, ((VirtualDispatchCellGenericLookupResult)other)._method); } protected override int GetHashCodeImpl() { return _method.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((VirtualDispatchCellGenericLookupResult)obj)._method == _method; } } /// <summary> /// Generic lookup result that points to the non-GC static base of a type. /// </summary> internal sealed class TypeNonGCStaticBaseGenericLookupResult : GenericLookupResult { private MetadataType _type; protected override int ClassCode => -328863267; public TypeNonGCStaticBaseGenericLookupResult(TypeDesc type) { Debug.Assert(type.IsRuntimeDeterminedSubtype, "Concrete static base in a generic dictionary?"); Debug.Assert(type is MetadataType); _type = (MetadataType)type; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { var instantiatedType = (MetadataType)_type.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); return factory.TypeNonGCStaticsSymbol(instantiatedType); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("NonGCStaticBase_"); sb.Append(nameMangler.GetMangledTypeName(_type)); } public override string ToString() => $"NonGCStaticBase: {_type}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.NonGcStaticDictionarySlot(_type); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.NonGcStatic, _type); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_type, ((TypeNonGCStaticBaseGenericLookupResult)other)._type); } protected override int GetHashCodeImpl() { return _type.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((TypeNonGCStaticBaseGenericLookupResult)obj)._type == _type; } } /// <summary> /// Generic lookup result that points to the threadstatic base index of a type. /// </summary> internal sealed class TypeThreadStaticBaseIndexGenericLookupResult : GenericLookupResult { private MetadataType _type; protected override int ClassCode => -177446371; public TypeThreadStaticBaseIndexGenericLookupResult(TypeDesc type) { Debug.Assert(type.IsRuntimeDeterminedSubtype, "Concrete static base in a generic dictionary?"); Debug.Assert(type is MetadataType); _type = (MetadataType)type; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { var instantiatedType = (MetadataType)_type.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); return factory.TypeThreadStaticIndex(instantiatedType); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("ThreadStaticBase_"); sb.Append(nameMangler.GetMangledTypeName(_type)); } public override string ToString() => $"ThreadStaticBase: {_type}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.ThreadStaticBaseIndexDictionarySlotNode(_type); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { // TODO throw new NotImplementedException(); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_type, ((TypeThreadStaticBaseIndexGenericLookupResult)other)._type); } protected override int GetHashCodeImpl() { return _type.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((TypeThreadStaticBaseIndexGenericLookupResult)obj)._type == _type; } } /// <summary> /// Generic lookup result that points to the GC static base of a type. /// </summary> public sealed class TypeGCStaticBaseGenericLookupResult : GenericLookupResult { private MetadataType _type; protected override int ClassCode => 429225829; public TypeGCStaticBaseGenericLookupResult(TypeDesc type) { Debug.Assert(type.IsRuntimeDeterminedSubtype, "Concrete static base in a generic dictionary?"); Debug.Assert(type is MetadataType); _type = (MetadataType)type; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { var instantiatedType = (MetadataType)_type.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); return factory.TypeGCStaticsSymbol(instantiatedType); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("GCStaticBase_"); sb.Append(nameMangler.GetMangledTypeName(_type)); } public MetadataType Type => _type; public override string ToString() => $"GCStaticBase: {_type}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.GcStaticDictionarySlot(_type); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.GcStatic, _type); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_type, ((TypeGCStaticBaseGenericLookupResult)other)._type); } protected override int GetHashCodeImpl() { return _type.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((TypeGCStaticBaseGenericLookupResult)obj)._type == _type; } } /// <summary> /// Generic lookup result that points to an object allocator. /// </summary> internal sealed class ObjectAllocatorGenericLookupResult : GenericLookupResult { private TypeDesc _type; protected override int ClassCode => -1671431655; public ObjectAllocatorGenericLookupResult(TypeDesc type) { Debug.Assert(type.IsRuntimeDeterminedSubtype, "Concrete type in a generic dictionary?"); _type = type; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { TypeDesc instantiatedType = _type.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); return factory.ExternSymbol(JitHelper.GetNewObjectHelperForType(instantiatedType)); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("AllocObject_"); sb.Append(nameMangler.GetMangledTypeName(_type)); } public override string ToString() => $"AllocObject: {_type}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.AllocateObjectDictionarySlot(_type); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.AllocObject, _type); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_type, ((ObjectAllocatorGenericLookupResult)other)._type); } protected override int GetHashCodeImpl() { return _type.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((ObjectAllocatorGenericLookupResult)obj)._type == _type; } } /// <summary> /// Generic lookup result that points to an array allocator. /// </summary> internal sealed class ArrayAllocatorGenericLookupResult : GenericLookupResult { private TypeDesc _type; protected override int ClassCode => -927905284; public ArrayAllocatorGenericLookupResult(TypeDesc type) { Debug.Assert(type.IsRuntimeDeterminedSubtype, "Concrete type in a generic dictionary?"); _type = type; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { TypeDesc instantiatedType = _type.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); Debug.Assert(instantiatedType.IsArray); return factory.ExternSymbol(JitHelper.GetNewArrayHelperForType(instantiatedType)); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("AllocArray_"); sb.Append(nameMangler.GetMangledTypeName(_type)); } public override string ToString() => $"AllocArray: {_type}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.AllocateArrayDictionarySlot(_type); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.AllocArray, _type); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_type, ((ArrayAllocatorGenericLookupResult)other)._type); } protected override int GetHashCodeImpl() { return _type.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((ArrayAllocatorGenericLookupResult)obj)._type == _type; } } /// <summary> /// Generic lookup result that points to an cast helper. /// </summary> internal sealed class CastClassGenericLookupResult : GenericLookupResult { private TypeDesc _type; protected override int ClassCode => 1691016084; public CastClassGenericLookupResult(TypeDesc type) { Debug.Assert(type.IsRuntimeDeterminedSubtype, "Concrete type in a generic dictionary?"); _type = type; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { TypeDesc instantiatedType = _type.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); return factory.ExternSymbol(JitHelper.GetCastingHelperNameForType(instantiatedType, true)); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("CastClass_"); sb.Append(nameMangler.GetMangledTypeName(_type)); } public override string ToString() => $"CastClass: {_type}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.CastClassDictionarySlot(_type); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.CastClass, _type); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_type, ((CastClassGenericLookupResult)other)._type); } protected override int GetHashCodeImpl() { return _type.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((CastClassGenericLookupResult)obj)._type == _type; } } /// <summary> /// Generic lookup result that points to an isInst helper. /// </summary> internal sealed class IsInstGenericLookupResult : GenericLookupResult { private TypeDesc _type; protected override int ClassCode => 1724059349; public IsInstGenericLookupResult(TypeDesc type) { Debug.Assert(type.IsRuntimeDeterminedSubtype, "Concrete type in a generic dictionary?"); _type = type; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { TypeDesc instantiatedType = _type.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); return factory.ExternSymbol(JitHelper.GetCastingHelperNameForType(instantiatedType, false)); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("IsInst_"); sb.Append(nameMangler.GetMangledTypeName(_type)); } public override string ToString() => $"IsInst: {_type}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.IsInstDictionarySlot(_type); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.IsInst, _type); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_type, ((IsInstGenericLookupResult)other)._type); } protected override int GetHashCodeImpl() { return _type.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((IsInstGenericLookupResult)obj)._type == _type; } } internal sealed class DefaultConstructorLookupResult : GenericLookupResult { private TypeDesc _type; protected override int ClassCode => -1391112482; public DefaultConstructorLookupResult(TypeDesc type) { Debug.Assert(type.IsRuntimeDeterminedSubtype, "Concrete type in a generic dictionary?"); _type = type; } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { TypeDesc instantiatedType = _type.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); MethodDesc defaultCtor = Compilation.GetConstructorForCreateInstanceIntrinsic(instantiatedType); return factory.CanonicalEntrypoint(defaultCtor); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("DefaultCtor_"); sb.Append(nameMangler.GetMangledTypeName(_type)); } public override string ToString() => $"DefaultConstructor: {_type}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.DefaultConstructorDictionarySlot(_type); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.DefaultCtor, _type); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_type, ((DefaultConstructorLookupResult)other)._type); } protected override int GetHashCodeImpl() { return _type.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((DefaultConstructorLookupResult)obj)._type == _type; } } internal sealed class CallingConventionConverterLookupResult : GenericLookupResult { private CallingConventionConverterKey _callingConventionConverter; protected override int ClassCode => -581806472; public CallingConventionConverterLookupResult(CallingConventionConverterKey callingConventionConverter) { _callingConventionConverter = callingConventionConverter; Debug.Assert(Internal.Runtime.UniversalGenericParameterLayout.MethodSignatureHasVarsNeedingCallingConventionConverter(callingConventionConverter.Signature)); } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { Debug.Fail("GetTarget for a CallingConventionConverterLookupResult doesn't make sense. It isn't a pointer being emitted"); return null; } public override void EmitDictionaryEntry(ref ObjectDataBuilder builder, NodeFactory factory, GenericLookupResultContext dictionary, GenericDictionaryNode dictionaryNode) { Debug.Fail("CallingConventionConverterLookupResult contents should only be generated into generic dictionaries at runtime"); builder.EmitNaturalInt(0); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("CallingConventionConverterLookupResult_"); sb.Append(_callingConventionConverter.GetName()); } public override string ToString() => "CallingConventionConverterLookupResult"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.CallingConventionConverter(_callingConventionConverter); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { // TODO throw new NotImplementedException(); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { var otherEntry = (CallingConventionConverterLookupResult)other; int result = (int)(_callingConventionConverter.ConverterKind - otherEntry._callingConventionConverter.ConverterKind); if (result != 0) return result; return comparer.Compare(_callingConventionConverter.Signature, otherEntry._callingConventionConverter.Signature); } protected override int GetHashCodeImpl() { return _callingConventionConverter.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((CallingConventionConverterLookupResult)obj)._callingConventionConverter.Equals(_callingConventionConverter); } } internal sealed class TypeSizeLookupResult : GenericLookupResult { private TypeDesc _type; protected override int ClassCode => -367755250; public TypeSizeLookupResult(TypeDesc type) { _type = type; Debug.Assert(type.IsRuntimeDeterminedSubtype, "Concrete type in a generic dictionary?"); } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { Debug.Fail("GetTarget for a TypeSizeLookupResult doesn't make sense. It isn't a pointer being emitted"); return null; } public override void EmitDictionaryEntry(ref ObjectDataBuilder builder, NodeFactory factory, GenericLookupResultContext dictionary, GenericDictionaryNode dictionaryNode) { TypeDesc instantiatedType = _type.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); int typeSize; if (_type.IsDefType) { typeSize = ((DefType)_type).InstanceFieldSize.AsInt; } else { typeSize = factory.TypeSystemContext.Target.PointerSize; } builder.EmitNaturalInt(typeSize); } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("TypeSize_"); sb.Append(nameMangler.GetMangledTypeName(_type)); } public override string ToString() => $"TypeSize: {_type}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.TypeSizeDictionarySlot(_type); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteData(LookupResultReferenceType(factory), LookupResultType.TypeSize, _type); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { return comparer.Compare(_type, ((TypeSizeLookupResult)other)._type); } protected override int GetHashCodeImpl() { return _type.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { return ((TypeSizeLookupResult)obj)._type == _type; } } internal sealed class ConstrainedMethodUseLookupResult : GenericLookupResult { MethodDesc _constrainedMethod; TypeDesc _constraintType; bool _directCall; protected override int ClassCode => -1525377658; public ConstrainedMethodUseLookupResult(MethodDesc constrainedMethod, TypeDesc constraintType, bool directCall) { _constrainedMethod = constrainedMethod; _constraintType = constraintType; _directCall = directCall; Debug.Assert(_constraintType.IsRuntimeDeterminedSubtype || _constrainedMethod.IsRuntimeDeterminedExactMethod, "Concrete type in a generic dictionary?"); Debug.Assert(!_constrainedMethod.HasInstantiation || !_directCall, "Direct call to constrained generic method isn't supported"); } public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { MethodDesc instantiatedConstrainedMethod = _constrainedMethod.GetNonRuntimeDeterminedMethodFromRuntimeDeterminedMethodViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); TypeDesc instantiatedConstraintType = _constraintType.GetNonRuntimeDeterminedTypeFromRuntimeDeterminedSubtypeViaSubstitution(dictionary.TypeInstantiation, dictionary.MethodInstantiation); MethodDesc implMethod; if (instantiatedConstrainedMethod.OwningType.IsInterface) { if (instantiatedConstrainedMethod.Signature.IsStatic) { implMethod = instantiatedConstraintType.GetClosestDefType().ResolveVariantInterfaceMethodToStaticVirtualMethodOnType(instantiatedConstrainedMethod); } else { throw new NotImplementedException(); } } else { implMethod = instantiatedConstraintType.GetClosestDefType().FindVirtualFunctionTargetMethodOnObjectType(instantiatedConstrainedMethod); } // AOT use of this generic lookup is restricted to finding methods on valuetypes (runtime usage of this slot in universal generics is more flexible) Debug.Assert(instantiatedConstraintType.IsValueType || (instantiatedConstrainedMethod.OwningType.IsInterface && instantiatedConstrainedMethod.Signature.IsStatic)); Debug.Assert(!instantiatedConstraintType.IsValueType || implMethod.OwningType == instantiatedConstraintType); if (implMethod.Signature.IsStatic) { if (implMethod.GetCanonMethodTarget(CanonicalFormKind.Specific).IsSharedByGenericInstantiations) return factory.ExactCallableAddress(implMethod); else return factory.MethodEntrypoint(implMethod); } else if (implMethod.HasInstantiation) { return factory.ExactCallableAddress(implMethod); } else { return factory.CanonicalEntrypoint(implMethod); } } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("ConstrainedMethodUseLookupResult_"); sb.Append(nameMangler.GetMangledTypeName(_constraintType)); sb.Append(nameMangler.GetMangledMethodName(_constrainedMethod)); if (_directCall) sb.Append("Direct"); } public override string ToString() => $"ConstrainedMethodUseLookupResult: {_constraintType} {_constrainedMethod} {_directCall}"; public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.NotSupportedDictionarySlot; //return factory.NativeLayout.ConstrainedMethodUse(_constrainedMethod, _constraintType, _directCall); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { // TODO throw new NotImplementedException(); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { var otherResult = (ConstrainedMethodUseLookupResult)other; int result = (_directCall ? 1 : 0) - (otherResult._directCall ? 1 : 0); if (result != 0) return result; result = comparer.Compare(_constraintType, otherResult._constraintType); if (result != 0) return result; return comparer.Compare(_constrainedMethod, otherResult._constrainedMethod); } protected override int GetHashCodeImpl() { return _constrainedMethod.GetHashCode() * 13 + _constraintType.GetHashCode(); } protected override bool EqualsImpl(GenericLookupResult obj) { var other = (ConstrainedMethodUseLookupResult)obj; return _constrainedMethod == other._constrainedMethod && _constraintType == other._constraintType && _directCall == other._directCall; } } public sealed class IntegerLookupResult : GenericLookupResult { int _integerValue; public IntegerLookupResult(int integer) { _integerValue = integer; } public int IntegerValue => _integerValue; protected override int ClassCode => 385752509; public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { return null; } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("IntegerLookupResult_").Append(_integerValue.ToString("x")); } public override string ToString() { return "IntegerLookupResult_" + _integerValue.ToString("x"); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { IntegerLookupResult lookupResultOther = (IntegerLookupResult)other; if (lookupResultOther._integerValue == _integerValue) return 0; return _integerValue > lookupResultOther._integerValue ? 1 : -1; } protected override bool EqualsImpl(GenericLookupResult other) { IntegerLookupResult lookupResultOther = (IntegerLookupResult)other; return lookupResultOther._integerValue == _integerValue; } protected override int GetHashCodeImpl() { return _integerValue; } public override void EmitDictionaryEntry(ref ObjectDataBuilder builder, NodeFactory factory, GenericLookupResultContext dictionary, GenericDictionaryNode dictionaryNode) { builder.EmitNaturalInt(_integerValue); } public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.IntegerSlot(_integerValue); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { writer.WriteIntegerSlot(_integerValue); } } public sealed class PointerToSlotLookupResult : GenericLookupResult { int _slotIndex; public PointerToSlotLookupResult(int slotIndex) { _slotIndex = slotIndex; } public int SlotIndex => _slotIndex; protected override int ClassCode => 551050755; public override ISymbolNode GetTarget(NodeFactory factory, GenericLookupResultContext dictionary) { return null; } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("PointerToSlotLookupResult_").Append(_slotIndex.ToString("x")); } public override string ToString() { return "PointerToSlotLookupResult_" + _slotIndex.ToString("x"); } protected override int CompareToImpl(GenericLookupResult other, TypeSystemComparer comparer) { PointerToSlotLookupResult pointerToSlotResultOther = (PointerToSlotLookupResult)other; if (pointerToSlotResultOther._slotIndex == _slotIndex) return 0; return _slotIndex > pointerToSlotResultOther._slotIndex ? 1 : -1; } protected override bool EqualsImpl(GenericLookupResult other) { PointerToSlotLookupResult pointerToSlotResultOther = (PointerToSlotLookupResult)other; return pointerToSlotResultOther._slotIndex == _slotIndex; } protected override int GetHashCodeImpl() { return _slotIndex; } public override void EmitDictionaryEntry(ref ObjectDataBuilder builder, NodeFactory factory, GenericLookupResultContext dictionary, GenericDictionaryNode dictionaryNode) { builder.EmitPointerReloc(dictionaryNode, _slotIndex * factory.Target.PointerSize); } public override NativeLayoutVertexNode TemplateDictionaryNode(NodeFactory factory) { return factory.NativeLayout.PointerToOtherSlot(_slotIndex); } public override void WriteDictionaryTocData(NodeFactory factory, IGenericLookupResultTocWriter writer) { // Under no circumstance should we attempt to write out a pointer to slot result throw new InvalidProgramException(); } } }
1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/InterfaceDispatchMapNode.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Diagnostics; using Internal.Text; using Internal.TypeSystem; using Internal.Runtime; namespace ILCompiler.DependencyAnalysis { public class InterfaceDispatchMapNode : ObjectNode, ISymbolDefinitionNode, ISortableSymbolNode { private readonly TypeDesc _type; public InterfaceDispatchMapNode(NodeFactory factory, TypeDesc type) { // Multidimensional arrays should not get a sealed vtable or a dispatch map. Runtime should use the // sealed vtable and dispatch map of the System.Array basetype instead. // Pointer arrays also follow the same path Debug.Assert(!type.IsArrayTypeWithoutGenericInterfaces()); Debug.Assert(MightHaveInterfaceDispatchMap(type, factory)); _type = type; } protected override string GetName(NodeFactory factory) => this.GetMangledName(factory.NameMangler); public void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append(nameMangler.CompilationUnitPrefix).Append("__InterfaceDispatchMap_").Append(nameMangler.SanitizeName(nameMangler.GetMangledTypeName(_type))); } public int Offset => 0; public override bool IsShareable => false; public override bool StaticDependenciesAreComputed => true; public override ObjectNodeSection Section { get { if (_type.Context.Target.IsWindows) return ObjectNodeSection.FoldableReadOnlyDataSection; else return ObjectNodeSection.DataSection; } } protected override DependencyList ComputeNonRelocationBasedDependencies(NodeFactory factory) { var result = new DependencyList(); result.Add(factory.InterfaceDispatchMapIndirection(_type), "Interface dispatch map indirection node"); // VTable slots of implemented interfaces are consulted during emission foreach (TypeDesc runtimeInterface in _type.RuntimeInterfaces) { result.Add(factory.VTable(runtimeInterface), "Interface for a dispatch map"); } return result; } /// <summary> /// Gets a value indicating whether '<paramref name="type"/>' might have a non-empty dispatch map. /// Note that this is only an approximation because we might not be able to take into account /// whether the interface methods are actually used. /// </summary> public static bool MightHaveInterfaceDispatchMap(TypeDesc type, NodeFactory factory) { if (type.IsArrayTypeWithoutGenericInterfaces()) return false; if (!type.IsArray && !type.IsDefType) return false; // Interfaces don't have a dispatch map because we dispatch them based on the // dispatch map of the implementing class. // The only exception are IDynamicInterfaceCastable scenarios that dispatch // using the interface dispatch map. // We generate the dispatch map irrespective of whether the interface actually // implements any methods (we don't run the for loop below) so that at runtime // we can distinguish between "the interface returned by IDynamicInterfaceCastable // wasn't marked as [DynamicInterfaceCastableImplementation]" and "we couldn't find an // implementation". We don't want to use the custom attribute for that at runtime because // that's reflection and this should work without reflection. if (type.IsInterface) return ((MetadataType)type).IsDynamicInterfaceCastableImplementation(); TypeDesc declType = type.GetClosestDefType(); for (int interfaceIndex = 0; interfaceIndex < declType.RuntimeInterfaces.Length; interfaceIndex++) { DefType interfaceType = declType.RuntimeInterfaces[interfaceIndex]; InstantiatedType interfaceOnDefinitionType = interfaceType.IsTypeDefinition ? null : (InstantiatedType)declType.GetTypeDefinition().RuntimeInterfaces[interfaceIndex]; IEnumerable<MethodDesc> slots; // If the vtable has fixed slots, we can query it directly. // If it's a lazily built vtable, we might not be able to query slots // just yet, so approximate by looking at all methods. VTableSliceNode vtableSlice = factory.VTable(interfaceType); if (vtableSlice.HasFixedSlots) slots = vtableSlice.Slots; else slots = interfaceType.GetAllVirtualMethods(); foreach (MethodDesc slotMethod in slots) { MethodDesc declMethod = slotMethod; Debug.Assert(!declMethod.Signature.IsStatic && declMethod.IsVirtual); if (interfaceOnDefinitionType != null) declMethod = factory.TypeSystemContext.GetMethodForInstantiatedType(declMethod.GetTypicalMethodDefinition(), interfaceOnDefinitionType); var implMethod = declType.GetTypeDefinition().ResolveInterfaceMethodToVirtualMethodOnType(declMethod); if (implMethod != null) { return true; } else { DefaultInterfaceMethodResolution result = declType.ResolveInterfaceMethodToDefaultImplementationOnType(slotMethod, out _); if (result != DefaultInterfaceMethodResolution.None) return true; } } } return false; } void EmitDispatchMap(ref ObjectDataBuilder builder, NodeFactory factory) { var entryCountReservation = builder.ReserveShort(); var defaultEntryCountReservation = builder.ReserveShort(); int entryCount = 0; TypeDesc declType = _type.GetClosestDefType(); TypeDesc declTypeDefinition = declType.GetTypeDefinition(); DefType[] declTypeRuntimeInterfaces = declType.RuntimeInterfaces; DefType[] declTypeDefinitionRuntimeInterfaces = declTypeDefinition.RuntimeInterfaces; // Catch any runtime interface collapsing. We shouldn't have any Debug.Assert(declTypeRuntimeInterfaces.Length == declTypeDefinitionRuntimeInterfaces.Length); var defaultImplementations = new List<(int InterfaceIndex, int InterfaceMethodSlot, int ImplMethodSlot)>(); // Resolve all the interfaces, but only emit non-default implementations for (int interfaceIndex = 0; interfaceIndex < declTypeRuntimeInterfaces.Length; interfaceIndex++) { var interfaceType = declTypeRuntimeInterfaces[interfaceIndex]; var interfaceDefinitionType = declTypeDefinitionRuntimeInterfaces[interfaceIndex]; Debug.Assert(interfaceType.IsInterface); IReadOnlyList<MethodDesc> virtualSlots = factory.VTable(interfaceType).Slots; for (int interfaceMethodSlot = 0; interfaceMethodSlot < virtualSlots.Count; interfaceMethodSlot++) { MethodDesc declMethod = virtualSlots[interfaceMethodSlot]; if(!interfaceType.IsTypeDefinition) declMethod = factory.TypeSystemContext.GetMethodForInstantiatedType(declMethod.GetTypicalMethodDefinition(), (InstantiatedType)interfaceDefinitionType); var implMethod = declTypeDefinition.ResolveInterfaceMethodToVirtualMethodOnType(declMethod); // Interface methods first implemented by a base type in the hierarchy will return null for the implMethod (runtime interface // dispatch will walk the inheritance chain). if (implMethod != null) { TypeDesc implType = declType; while (!implType.HasSameTypeDefinition(implMethod.OwningType)) implType = implType.BaseType; MethodDesc targetMethod = implMethod; if (!implType.IsTypeDefinition) targetMethod = factory.TypeSystemContext.GetMethodForInstantiatedType(implMethod.GetTypicalMethodDefinition(), (InstantiatedType)implType); builder.EmitShort((short)checked((ushort)interfaceIndex)); builder.EmitShort((short)checked((ushort)(interfaceMethodSlot + (interfaceType.HasGenericDictionarySlot() ? 1 : 0)))); builder.EmitShort((short)checked((ushort)VirtualMethodSlotHelper.GetVirtualMethodSlot(factory, targetMethod, declType))); entryCount++; } else { // Is there a default implementation? int? implSlot = null; DefaultInterfaceMethodResolution result = declTypeDefinition.ResolveInterfaceMethodToDefaultImplementationOnType(declMethod, out implMethod); if (result == DefaultInterfaceMethodResolution.DefaultImplementation) { DefType providingInterfaceDefinitionType = (DefType)implMethod.OwningType; implMethod = implMethod.InstantiateSignature(declType.Instantiation, Instantiation.Empty); implSlot = VirtualMethodSlotHelper.GetDefaultInterfaceMethodSlot(factory, implMethod, declType, providingInterfaceDefinitionType); } else if (result == DefaultInterfaceMethodResolution.Reabstraction) { implSlot = SpecialDispatchMapSlot.Reabstraction; } else if (result == DefaultInterfaceMethodResolution.Diamond) { implSlot = SpecialDispatchMapSlot.Diamond; } if (implSlot.HasValue) { defaultImplementations.Add(( interfaceIndex, interfaceMethodSlot + (interfaceType.HasGenericDictionarySlot() ? 1 : 0), implSlot.Value)); } } } } // Now emit the default implementations foreach (var defaultImplementation in defaultImplementations) { builder.EmitShort((short)checked((ushort)defaultImplementation.InterfaceIndex)); builder.EmitShort((short)checked((ushort)defaultImplementation.InterfaceMethodSlot)); builder.EmitShort((short)checked((ushort)defaultImplementation.ImplMethodSlot)); } // Update the header builder.EmitShort(entryCountReservation, (short)checked((ushort)entryCount)); builder.EmitShort(defaultEntryCountReservation, (short)checked((ushort)defaultImplementations.Count)); } public override ObjectData GetData(NodeFactory factory, bool relocsOnly = false) { ObjectDataBuilder objData = new ObjectDataBuilder(factory, relocsOnly); objData.RequireInitialAlignment(2); objData.AddSymbol(this); if (!relocsOnly) { EmitDispatchMap(ref objData, factory); } return objData.ToObjectData(); } public override int ClassCode => 848664602; public override int CompareToImpl(ISortableNode other, CompilerComparer comparer) { return comparer.Compare(_type, ((InterfaceDispatchMapNode)other)._type); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Diagnostics; using Internal.Text; using Internal.TypeSystem; using Internal.Runtime; namespace ILCompiler.DependencyAnalysis { public class InterfaceDispatchMapNode : ObjectNode, ISymbolDefinitionNode, ISortableSymbolNode { private readonly TypeDesc _type; public InterfaceDispatchMapNode(NodeFactory factory, TypeDesc type) { // Multidimensional arrays should not get a sealed vtable or a dispatch map. Runtime should use the // sealed vtable and dispatch map of the System.Array basetype instead. // Pointer arrays also follow the same path Debug.Assert(!type.IsArrayTypeWithoutGenericInterfaces()); Debug.Assert(MightHaveInterfaceDispatchMap(type, factory)); _type = type; } protected override string GetName(NodeFactory factory) => this.GetMangledName(factory.NameMangler); public void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append(nameMangler.CompilationUnitPrefix).Append("__InterfaceDispatchMap_").Append(nameMangler.SanitizeName(nameMangler.GetMangledTypeName(_type))); } public int Offset => 0; public override bool IsShareable => false; public override bool StaticDependenciesAreComputed => true; public override ObjectNodeSection Section { get { if (_type.Context.Target.IsWindows) return ObjectNodeSection.FoldableReadOnlyDataSection; else return ObjectNodeSection.DataSection; } } protected override DependencyList ComputeNonRelocationBasedDependencies(NodeFactory factory) { var result = new DependencyList(); result.Add(factory.InterfaceDispatchMapIndirection(_type), "Interface dispatch map indirection node"); // VTable slots of implemented interfaces are consulted during emission foreach (TypeDesc runtimeInterface in _type.RuntimeInterfaces) { result.Add(factory.VTable(runtimeInterface), "Interface for a dispatch map"); } return result; } /// <summary> /// Gets a value indicating whether '<paramref name="type"/>' might have a non-empty dispatch map. /// Note that this is only an approximation because we might not be able to take into account /// whether the interface methods are actually used. /// </summary> public static bool MightHaveInterfaceDispatchMap(TypeDesc type, NodeFactory factory) { if (type.IsArrayTypeWithoutGenericInterfaces()) return false; if (!type.IsArray && !type.IsDefType) return false; // Interfaces don't have a dispatch map because we dispatch them based on the // dispatch map of the implementing class. // The only exception are IDynamicInterfaceCastable scenarios that dispatch // using the interface dispatch map. // We generate the dispatch map irrespective of whether the interface actually // implements any methods (we don't run the for loop below) so that at runtime // we can distinguish between "the interface returned by IDynamicInterfaceCastable // wasn't marked as [DynamicInterfaceCastableImplementation]" and "we couldn't find an // implementation". We don't want to use the custom attribute for that at runtime because // that's reflection and this should work without reflection. if (type.IsInterface) return ((MetadataType)type).IsDynamicInterfaceCastableImplementation(); TypeDesc declType = type.GetClosestDefType(); for (int interfaceIndex = 0; interfaceIndex < declType.RuntimeInterfaces.Length; interfaceIndex++) { DefType interfaceType = declType.RuntimeInterfaces[interfaceIndex]; InstantiatedType interfaceOnDefinitionType = interfaceType.IsTypeDefinition ? null : (InstantiatedType)declType.GetTypeDefinition().RuntimeInterfaces[interfaceIndex]; IEnumerable<MethodDesc> slots; // If the vtable has fixed slots, we can query it directly. // If it's a lazily built vtable, we might not be able to query slots // just yet, so approximate by looking at all methods. VTableSliceNode vtableSlice = factory.VTable(interfaceType); if (vtableSlice.HasFixedSlots) slots = vtableSlice.Slots; else slots = interfaceType.GetAllVirtualMethods(); foreach (MethodDesc slotMethod in slots) { // Static interface methods don't go in the dispatch map if (slotMethod.Signature.IsStatic) continue; MethodDesc declMethod = slotMethod; Debug.Assert(!declMethod.Signature.IsStatic && declMethod.IsVirtual); if (interfaceOnDefinitionType != null) declMethod = factory.TypeSystemContext.GetMethodForInstantiatedType(declMethod.GetTypicalMethodDefinition(), interfaceOnDefinitionType); var implMethod = declType.GetTypeDefinition().ResolveInterfaceMethodToVirtualMethodOnType(declMethod); if (implMethod != null) { return true; } else { DefaultInterfaceMethodResolution result = declType.ResolveInterfaceMethodToDefaultImplementationOnType(slotMethod, out _); if (result != DefaultInterfaceMethodResolution.None) return true; } } } return false; } void EmitDispatchMap(ref ObjectDataBuilder builder, NodeFactory factory) { var entryCountReservation = builder.ReserveShort(); var defaultEntryCountReservation = builder.ReserveShort(); int entryCount = 0; TypeDesc declType = _type.GetClosestDefType(); TypeDesc declTypeDefinition = declType.GetTypeDefinition(); DefType[] declTypeRuntimeInterfaces = declType.RuntimeInterfaces; DefType[] declTypeDefinitionRuntimeInterfaces = declTypeDefinition.RuntimeInterfaces; // Catch any runtime interface collapsing. We shouldn't have any Debug.Assert(declTypeRuntimeInterfaces.Length == declTypeDefinitionRuntimeInterfaces.Length); var defaultImplementations = new List<(int InterfaceIndex, int InterfaceMethodSlot, int ImplMethodSlot)>(); // Resolve all the interfaces, but only emit non-default implementations for (int interfaceIndex = 0; interfaceIndex < declTypeRuntimeInterfaces.Length; interfaceIndex++) { var interfaceType = declTypeRuntimeInterfaces[interfaceIndex]; var interfaceDefinitionType = declTypeDefinitionRuntimeInterfaces[interfaceIndex]; Debug.Assert(interfaceType.IsInterface); IReadOnlyList<MethodDesc> virtualSlots = factory.VTable(interfaceType).Slots; for (int interfaceMethodSlot = 0; interfaceMethodSlot < virtualSlots.Count; interfaceMethodSlot++) { MethodDesc declMethod = virtualSlots[interfaceMethodSlot]; if(!interfaceType.IsTypeDefinition) declMethod = factory.TypeSystemContext.GetMethodForInstantiatedType(declMethod.GetTypicalMethodDefinition(), (InstantiatedType)interfaceDefinitionType); var implMethod = declTypeDefinition.ResolveInterfaceMethodToVirtualMethodOnType(declMethod); // Interface methods first implemented by a base type in the hierarchy will return null for the implMethod (runtime interface // dispatch will walk the inheritance chain). if (implMethod != null) { TypeDesc implType = declType; while (!implType.HasSameTypeDefinition(implMethod.OwningType)) implType = implType.BaseType; MethodDesc targetMethod = implMethod; if (!implType.IsTypeDefinition) targetMethod = factory.TypeSystemContext.GetMethodForInstantiatedType(implMethod.GetTypicalMethodDefinition(), (InstantiatedType)implType); builder.EmitShort((short)checked((ushort)interfaceIndex)); builder.EmitShort((short)checked((ushort)(interfaceMethodSlot + (interfaceType.HasGenericDictionarySlot() ? 1 : 0)))); builder.EmitShort((short)checked((ushort)VirtualMethodSlotHelper.GetVirtualMethodSlot(factory, targetMethod, declType))); entryCount++; } else { // Is there a default implementation? int? implSlot = null; DefaultInterfaceMethodResolution result = declTypeDefinition.ResolveInterfaceMethodToDefaultImplementationOnType(declMethod, out implMethod); if (result == DefaultInterfaceMethodResolution.DefaultImplementation) { DefType providingInterfaceDefinitionType = (DefType)implMethod.OwningType; implMethod = implMethod.InstantiateSignature(declType.Instantiation, Instantiation.Empty); implSlot = VirtualMethodSlotHelper.GetDefaultInterfaceMethodSlot(factory, implMethod, declType, providingInterfaceDefinitionType); } else if (result == DefaultInterfaceMethodResolution.Reabstraction) { implSlot = SpecialDispatchMapSlot.Reabstraction; } else if (result == DefaultInterfaceMethodResolution.Diamond) { implSlot = SpecialDispatchMapSlot.Diamond; } if (implSlot.HasValue) { defaultImplementations.Add(( interfaceIndex, interfaceMethodSlot + (interfaceType.HasGenericDictionarySlot() ? 1 : 0), implSlot.Value)); } } } } // Now emit the default implementations foreach (var defaultImplementation in defaultImplementations) { builder.EmitShort((short)checked((ushort)defaultImplementation.InterfaceIndex)); builder.EmitShort((short)checked((ushort)defaultImplementation.InterfaceMethodSlot)); builder.EmitShort((short)checked((ushort)defaultImplementation.ImplMethodSlot)); } // Update the header builder.EmitShort(entryCountReservation, (short)checked((ushort)entryCount)); builder.EmitShort(defaultEntryCountReservation, (short)checked((ushort)defaultImplementations.Count)); } public override ObjectData GetData(NodeFactory factory, bool relocsOnly = false) { ObjectDataBuilder objData = new ObjectDataBuilder(factory, relocsOnly); objData.RequireInitialAlignment(2); objData.AddSymbol(this); if (!relocsOnly) { EmitDispatchMap(ref objData, factory); } return objData.ToObjectData(); } public override int ClassCode => 848664602; public override int CompareToImpl(ISortableNode other, CompilerComparer comparer) { return comparer.Compare(_type, ((InterfaceDispatchMapNode)other)._type); } } }
1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/ReadyToRunGenericHelperNode.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Diagnostics; using Internal.IL; using Internal.Text; using Internal.TypeSystem; using ILCompiler.DependencyAnalysisFramework; namespace ILCompiler.DependencyAnalysis { public abstract partial class ReadyToRunGenericHelperNode : AssemblyStubNode, INodeWithRuntimeDeterminedDependencies { private readonly ReadyToRunHelperId _id; private readonly object _target; protected readonly TypeSystemEntity _dictionaryOwner; protected readonly GenericLookupResult _lookupSignature; // True if any of slots in dictionaries associated with this layout could not be filled // at compile time due to a TypeSystemException. Only query through HandlesInvalidEntries // below so that we can assert this is not queried at an inappropriate time before // the whole program view has been established. private bool _hasInvalidEntries; public ReadyToRunHelperId Id => _id; public Object Target => _target; public TypeSystemEntity DictionaryOwner => _dictionaryOwner; public GenericLookupResult LookupSignature => _lookupSignature; public bool HandlesInvalidEntries(NodeFactory factory) { Debug.Assert(factory.MarkingComplete); return _hasInvalidEntries; } public ReadyToRunGenericHelperNode(NodeFactory factory, ReadyToRunHelperId helperId, object target, TypeSystemEntity dictionaryOwner) { _id = helperId; _dictionaryOwner = dictionaryOwner; _target = target; _lookupSignature = GetLookupSignature(factory, helperId, target); } public static GenericLookupResult GetLookupSignature(NodeFactory factory, ReadyToRunHelperId id, object target) { // Necessary type handle is not something you can put in a dictionary - someone should have normalized to TypeHandle Debug.Assert(id != ReadyToRunHelperId.NecessaryTypeHandle); switch (id) { case ReadyToRunHelperId.TypeHandle: return factory.GenericLookup.Type((TypeDesc)target); case ReadyToRunHelperId.TypeHandleForCasting: // Check that we unwrapped the cases that could be unwrapped to prevent duplicate entries Debug.Assert(factory.GenericLookup.Type((TypeDesc)target) != factory.GenericLookup.UnwrapNullableType((TypeDesc)target)); return factory.GenericLookup.UnwrapNullableType((TypeDesc)target); case ReadyToRunHelperId.MethodHandle: return factory.GenericLookup.MethodHandle((MethodDesc)target); case ReadyToRunHelperId.FieldHandle: return factory.GenericLookup.FieldHandle((FieldDesc)target); case ReadyToRunHelperId.GetGCStaticBase: return factory.GenericLookup.TypeGCStaticBase((TypeDesc)target); case ReadyToRunHelperId.GetNonGCStaticBase: return factory.GenericLookup.TypeNonGCStaticBase((TypeDesc)target); case ReadyToRunHelperId.GetThreadStaticBase: return factory.GenericLookup.TypeThreadStaticBaseIndex((TypeDesc)target); case ReadyToRunHelperId.MethodDictionary: return factory.GenericLookup.MethodDictionary((MethodDesc)target); case ReadyToRunHelperId.VirtualDispatchCell: return factory.GenericLookup.VirtualDispatchCell((MethodDesc)target); case ReadyToRunHelperId.MethodEntry: return factory.GenericLookup.MethodEntry((MethodDesc)target); case ReadyToRunHelperId.DelegateCtor: return ((DelegateCreationInfo)target).GetLookupKind(factory); case ReadyToRunHelperId.DefaultConstructor: return factory.GenericLookup.DefaultCtorLookupResult((TypeDesc)target); case ReadyToRunHelperId.ObjectAllocator: return factory.GenericLookup.ObjectAllocator((TypeDesc)target); default: throw new NotImplementedException(); } } protected override bool IsVisibleFromManagedCode => false; protected sealed override string GetName(NodeFactory factory) => this.GetMangledName(factory.NameMangler); public override bool IsShareable => true; protected sealed override void OnMarked(NodeFactory factory) { DictionaryLayoutNode layout = factory.GenericDictionaryLayout(_dictionaryOwner); if (layout.HasUnfixedSlots) { // When the helper call gets marked, ensure the generic layout for the associated dictionaries // includes the signature. layout.EnsureEntry(_lookupSignature); if ((_id == ReadyToRunHelperId.GetGCStaticBase || _id == ReadyToRunHelperId.GetThreadStaticBase) && factory.PreinitializationManager.HasLazyStaticConstructor((TypeDesc)_target)) { // If the type has a lazy static constructor, we also need the non-GC static base // because that's where the class constructor context is. layout.EnsureEntry(factory.GenericLookup.TypeNonGCStaticBase((TypeDesc)_target)); } } } public IEnumerable<DependencyListEntry> InstantiateDependencies(NodeFactory factory, Instantiation typeInstantiation, Instantiation methodInstantiation) { DependencyList result = new DependencyList(); var lookupContext = new GenericLookupResultContext(_dictionaryOwner, typeInstantiation, methodInstantiation); switch (_id) { case ReadyToRunHelperId.GetGCStaticBase: case ReadyToRunHelperId.GetThreadStaticBase: { // If the type has a lazy static constructor, we also need the non-GC static base // because that's where the class constructor context is. TypeDesc type = (TypeDesc)_target; if (factory.PreinitializationManager.HasLazyStaticConstructor(type)) { result.Add( new DependencyListEntry( factory.GenericLookup.TypeNonGCStaticBase(type).GetTarget(factory, lookupContext), "Dictionary dependency")); } } break; case ReadyToRunHelperId.DelegateCtor: { DelegateCreationInfo createInfo = (DelegateCreationInfo)_target; if (createInfo.NeedsVirtualMethodUseTracking) { MethodDesc instantiatedTargetMethod = createInfo.TargetMethod.GetNonRuntimeDeterminedMethodFromRuntimeDeterminedMethodViaSubstitution(typeInstantiation, methodInstantiation); if (!factory.VTable(instantiatedTargetMethod.OwningType).HasFixedSlots) { result.Add( new DependencyListEntry( factory.VirtualMethodUse(instantiatedTargetMethod), "Dictionary dependency")); } factory.MetadataManager.GetDependenciesDueToVirtualMethodReflectability(ref result, factory, instantiatedTargetMethod); } } break; } try { // All generic lookups depend on the thing they point to result.Add(new DependencyListEntry( _lookupSignature.GetTarget(factory, lookupContext), "Dictionary dependency")); } catch (TypeSystemException) { // If there was an exception, we're going to generate a null slot in the associated // dictionary. The helper needs to be able to handle a null slot and tailcall // and exception throwing helper instead of returning a result. _hasInvalidEntries = true; result.Add(GetBadSlotHelper(factory), "Failure to build dictionary slot"); } return result.ToArray(); } private static IMethodNode GetBadSlotHelper(NodeFactory factory) { return factory.MethodEntrypoint(factory.TypeSystemContext.GetHelperEntryPoint("ThrowHelpers", "ThrowUnavailableType")); } protected void AppendLookupSignatureMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { if (_id != ReadyToRunHelperId.DelegateCtor) { _lookupSignature.AppendMangledName(nameMangler, sb); } else { ((DelegateCreationInfo)_target).AppendMangledName(nameMangler, sb); } } protected override DependencyList ComputeNonRelocationBasedDependencies(NodeFactory factory) { DependencyList dependencies = new DependencyList(); if (_dictionaryOwner is TypeDesc type) { // The generic lookup will need to consult the vtable of the owning type to find the // vtable slot where the generic dictionary is placed - report the dependency. dependencies.Add(factory.VTable(type), "Owning type vtable"); } dependencies.Add(factory.GenericDictionaryLayout(_dictionaryOwner), "Layout"); foreach (DependencyNodeCore<NodeFactory> dependency in _lookupSignature.NonRelocDependenciesFromUsage(factory)) { dependencies.Add(new DependencyListEntry(dependency, "GenericLookupResultDependency")); } return dependencies; } public override bool HasConditionalStaticDependencies => true; public override IEnumerable<CombinedDependencyListEntry> GetConditionalStaticDependencies(NodeFactory factory) { List<CombinedDependencyListEntry> conditionalDependencies = new List<CombinedDependencyListEntry>(); NativeLayoutSavedVertexNode templateLayout; if (_dictionaryOwner is MethodDesc) { templateLayout = factory.NativeLayout.TemplateMethodLayout((MethodDesc)_dictionaryOwner); conditionalDependencies.Add(new CombinedDependencyListEntry(_lookupSignature.TemplateDictionaryNode(factory), templateLayout, "Type loader template")); } else { templateLayout = factory.NativeLayout.TemplateTypeLayout((TypeDesc)_dictionaryOwner); conditionalDependencies.Add(new CombinedDependencyListEntry(_lookupSignature.TemplateDictionaryNode(factory), templateLayout, "Type loader template")); } if (_id == ReadyToRunHelperId.GetGCStaticBase || _id == ReadyToRunHelperId.GetThreadStaticBase) { // If the type has a lazy static constructor, we also need the non-GC static base to be available as // a template dictionary node. TypeDesc type = (TypeDesc)_target; Debug.Assert(templateLayout != null); if (factory.PreinitializationManager.HasLazyStaticConstructor(type)) { GenericLookupResult nonGcRegionLookup = factory.GenericLookup.TypeNonGCStaticBase(type); conditionalDependencies.Add(new CombinedDependencyListEntry(nonGcRegionLookup.TemplateDictionaryNode(factory), templateLayout, "Type loader template")); } } return conditionalDependencies; } public override int CompareToImpl(ISortableNode other, CompilerComparer comparer) { var compare = _id.CompareTo(((ReadyToRunGenericHelperNode)other)._id); if (compare != 0) return compare; if (_dictionaryOwner is MethodDesc) { if (((ReadyToRunGenericHelperNode)other)._dictionaryOwner is TypeDesc) return -1; compare = comparer.Compare((MethodDesc)_dictionaryOwner, (MethodDesc)((ReadyToRunGenericHelperNode)other)._dictionaryOwner); } else { if (((ReadyToRunGenericHelperNode)other)._dictionaryOwner is MethodDesc) return 1; compare = comparer.Compare((TypeDesc)_dictionaryOwner, (TypeDesc)((ReadyToRunGenericHelperNode)other)._dictionaryOwner); } if (compare != 0) return compare; switch (_id) { case ReadyToRunHelperId.TypeHandle: case ReadyToRunHelperId.GetGCStaticBase: case ReadyToRunHelperId.GetNonGCStaticBase: case ReadyToRunHelperId.GetThreadStaticBase: case ReadyToRunHelperId.DefaultConstructor: case ReadyToRunHelperId.ObjectAllocator: return comparer.Compare((TypeDesc)_target, (TypeDesc)((ReadyToRunGenericHelperNode)other)._target); case ReadyToRunHelperId.MethodHandle: case ReadyToRunHelperId.MethodDictionary: case ReadyToRunHelperId.VirtualDispatchCell: case ReadyToRunHelperId.MethodEntry: return comparer.Compare((MethodDesc)_target, (MethodDesc)((ReadyToRunGenericHelperNode)other)._target); case ReadyToRunHelperId.FieldHandle: return comparer.Compare((FieldDesc)_target, (FieldDesc)((ReadyToRunGenericHelperNode)other)._target); case ReadyToRunHelperId.DelegateCtor: return ((DelegateCreationInfo)_target).CompareTo((DelegateCreationInfo)((ReadyToRunGenericHelperNode)other)._target, comparer); default: throw new NotImplementedException(); } } } public partial class ReadyToRunGenericLookupFromDictionaryNode : ReadyToRunGenericHelperNode { public ReadyToRunGenericLookupFromDictionaryNode(NodeFactory factory, ReadyToRunHelperId helperId, object target, TypeSystemEntity dictionaryOwner) : base(factory, helperId, target, dictionaryOwner) { } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { Utf8String mangledContextName; if (_dictionaryOwner is MethodDesc) mangledContextName = nameMangler.GetMangledMethodName((MethodDesc)_dictionaryOwner); else mangledContextName = nameMangler.GetMangledTypeName((TypeDesc)_dictionaryOwner); sb.Append("__GenericLookupFromDict_").Append(mangledContextName).Append("_"); AppendLookupSignatureMangledName(nameMangler, sb); } public override int ClassCode => 1055354299; } public partial class ReadyToRunGenericLookupFromTypeNode : ReadyToRunGenericHelperNode { public ReadyToRunGenericLookupFromTypeNode(NodeFactory factory, ReadyToRunHelperId helperId, object target, TypeSystemEntity dictionaryOwner) : base(factory, helperId, target, dictionaryOwner) { } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { Utf8String mangledContextName; if (_dictionaryOwner is MethodDesc) mangledContextName = nameMangler.GetMangledMethodName((MethodDesc)_dictionaryOwner); else mangledContextName = nameMangler.GetMangledTypeName((TypeDesc)_dictionaryOwner); sb.Append("__GenericLookupFromType_").Append(mangledContextName).Append("_"); AppendLookupSignatureMangledName(nameMangler, sb); } public override int ClassCode => 913214059; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Diagnostics; using Internal.IL; using Internal.Text; using Internal.TypeSystem; using ILCompiler.DependencyAnalysisFramework; namespace ILCompiler.DependencyAnalysis { public abstract partial class ReadyToRunGenericHelperNode : AssemblyStubNode, INodeWithRuntimeDeterminedDependencies { private readonly ReadyToRunHelperId _id; private readonly object _target; protected readonly TypeSystemEntity _dictionaryOwner; protected readonly GenericLookupResult _lookupSignature; // True if any of slots in dictionaries associated with this layout could not be filled // at compile time due to a TypeSystemException. Only query through HandlesInvalidEntries // below so that we can assert this is not queried at an inappropriate time before // the whole program view has been established. private bool _hasInvalidEntries; public ReadyToRunHelperId Id => _id; public Object Target => _target; public TypeSystemEntity DictionaryOwner => _dictionaryOwner; public GenericLookupResult LookupSignature => _lookupSignature; public bool HandlesInvalidEntries(NodeFactory factory) { Debug.Assert(factory.MarkingComplete); return _hasInvalidEntries; } public ReadyToRunGenericHelperNode(NodeFactory factory, ReadyToRunHelperId helperId, object target, TypeSystemEntity dictionaryOwner) { _id = helperId; _dictionaryOwner = dictionaryOwner; _target = target; _lookupSignature = GetLookupSignature(factory, helperId, target); } public static GenericLookupResult GetLookupSignature(NodeFactory factory, ReadyToRunHelperId id, object target) { // Necessary type handle is not something you can put in a dictionary - someone should have normalized to TypeHandle Debug.Assert(id != ReadyToRunHelperId.NecessaryTypeHandle); switch (id) { case ReadyToRunHelperId.TypeHandle: return factory.GenericLookup.Type((TypeDesc)target); case ReadyToRunHelperId.TypeHandleForCasting: // Check that we unwrapped the cases that could be unwrapped to prevent duplicate entries Debug.Assert(factory.GenericLookup.Type((TypeDesc)target) != factory.GenericLookup.UnwrapNullableType((TypeDesc)target)); return factory.GenericLookup.UnwrapNullableType((TypeDesc)target); case ReadyToRunHelperId.MethodHandle: return factory.GenericLookup.MethodHandle((MethodDesc)target); case ReadyToRunHelperId.FieldHandle: return factory.GenericLookup.FieldHandle((FieldDesc)target); case ReadyToRunHelperId.GetGCStaticBase: return factory.GenericLookup.TypeGCStaticBase((TypeDesc)target); case ReadyToRunHelperId.GetNonGCStaticBase: return factory.GenericLookup.TypeNonGCStaticBase((TypeDesc)target); case ReadyToRunHelperId.GetThreadStaticBase: return factory.GenericLookup.TypeThreadStaticBaseIndex((TypeDesc)target); case ReadyToRunHelperId.MethodDictionary: return factory.GenericLookup.MethodDictionary((MethodDesc)target); case ReadyToRunHelperId.VirtualDispatchCell: return factory.GenericLookup.VirtualDispatchCell((MethodDesc)target); case ReadyToRunHelperId.MethodEntry: return factory.GenericLookup.MethodEntry((MethodDesc)target); case ReadyToRunHelperId.DelegateCtor: return ((DelegateCreationInfo)target).GetLookupKind(factory); case ReadyToRunHelperId.DefaultConstructor: return factory.GenericLookup.DefaultCtorLookupResult((TypeDesc)target); case ReadyToRunHelperId.ObjectAllocator: return factory.GenericLookup.ObjectAllocator((TypeDesc)target); case ReadyToRunHelperId.ConstrainedDirectCall: return factory.GenericLookup.ConstrainedMethodUse( ((ConstrainedCallInfo)target).Method, ((ConstrainedCallInfo)target).ConstrainedType, directCall: !((ConstrainedCallInfo)target).Method.HasInstantiation); default: throw new NotImplementedException(); } } protected override bool IsVisibleFromManagedCode => false; protected sealed override string GetName(NodeFactory factory) => this.GetMangledName(factory.NameMangler); public override bool IsShareable => true; protected sealed override void OnMarked(NodeFactory factory) { DictionaryLayoutNode layout = factory.GenericDictionaryLayout(_dictionaryOwner); if (layout.HasUnfixedSlots) { // When the helper call gets marked, ensure the generic layout for the associated dictionaries // includes the signature. layout.EnsureEntry(_lookupSignature); if ((_id == ReadyToRunHelperId.GetGCStaticBase || _id == ReadyToRunHelperId.GetThreadStaticBase) && factory.PreinitializationManager.HasLazyStaticConstructor((TypeDesc)_target)) { // If the type has a lazy static constructor, we also need the non-GC static base // because that's where the class constructor context is. layout.EnsureEntry(factory.GenericLookup.TypeNonGCStaticBase((TypeDesc)_target)); } } } public IEnumerable<DependencyListEntry> InstantiateDependencies(NodeFactory factory, Instantiation typeInstantiation, Instantiation methodInstantiation) { DependencyList result = new DependencyList(); var lookupContext = new GenericLookupResultContext(_dictionaryOwner, typeInstantiation, methodInstantiation); switch (_id) { case ReadyToRunHelperId.GetGCStaticBase: case ReadyToRunHelperId.GetThreadStaticBase: { // If the type has a lazy static constructor, we also need the non-GC static base // because that's where the class constructor context is. TypeDesc type = (TypeDesc)_target; if (factory.PreinitializationManager.HasLazyStaticConstructor(type)) { result.Add( new DependencyListEntry( factory.GenericLookup.TypeNonGCStaticBase(type).GetTarget(factory, lookupContext), "Dictionary dependency")); } } break; case ReadyToRunHelperId.DelegateCtor: { DelegateCreationInfo createInfo = (DelegateCreationInfo)_target; if (createInfo.NeedsVirtualMethodUseTracking) { MethodDesc instantiatedTargetMethod = createInfo.TargetMethod.GetNonRuntimeDeterminedMethodFromRuntimeDeterminedMethodViaSubstitution(typeInstantiation, methodInstantiation); if (!factory.VTable(instantiatedTargetMethod.OwningType).HasFixedSlots) { result.Add( new DependencyListEntry( factory.VirtualMethodUse(instantiatedTargetMethod), "Dictionary dependency")); } factory.MetadataManager.GetDependenciesDueToVirtualMethodReflectability(ref result, factory, instantiatedTargetMethod); } } break; } try { // All generic lookups depend on the thing they point to result.Add(new DependencyListEntry( _lookupSignature.GetTarget(factory, lookupContext), "Dictionary dependency")); } catch (TypeSystemException) { // If there was an exception, we're going to generate a null slot in the associated // dictionary. The helper needs to be able to handle a null slot and tailcall // and exception throwing helper instead of returning a result. _hasInvalidEntries = true; result.Add(GetBadSlotHelper(factory), "Failure to build dictionary slot"); } return result.ToArray(); } private static IMethodNode GetBadSlotHelper(NodeFactory factory) { return factory.MethodEntrypoint(factory.TypeSystemContext.GetHelperEntryPoint("ThrowHelpers", "ThrowUnavailableType")); } protected void AppendLookupSignatureMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { if (_id != ReadyToRunHelperId.DelegateCtor) { _lookupSignature.AppendMangledName(nameMangler, sb); } else { ((DelegateCreationInfo)_target).AppendMangledName(nameMangler, sb); } } protected override DependencyList ComputeNonRelocationBasedDependencies(NodeFactory factory) { DependencyList dependencies = new DependencyList(); if (_dictionaryOwner is TypeDesc type) { // The generic lookup will need to consult the vtable of the owning type to find the // vtable slot where the generic dictionary is placed - report the dependency. dependencies.Add(factory.VTable(type), "Owning type vtable"); } dependencies.Add(factory.GenericDictionaryLayout(_dictionaryOwner), "Layout"); foreach (DependencyNodeCore<NodeFactory> dependency in _lookupSignature.NonRelocDependenciesFromUsage(factory)) { dependencies.Add(new DependencyListEntry(dependency, "GenericLookupResultDependency")); } return dependencies; } public override bool HasConditionalStaticDependencies => true; public override IEnumerable<CombinedDependencyListEntry> GetConditionalStaticDependencies(NodeFactory factory) { List<CombinedDependencyListEntry> conditionalDependencies = new List<CombinedDependencyListEntry>(); NativeLayoutSavedVertexNode templateLayout; if (_dictionaryOwner is MethodDesc) { templateLayout = factory.NativeLayout.TemplateMethodLayout((MethodDesc)_dictionaryOwner); conditionalDependencies.Add(new CombinedDependencyListEntry(_lookupSignature.TemplateDictionaryNode(factory), templateLayout, "Type loader template")); } else { templateLayout = factory.NativeLayout.TemplateTypeLayout((TypeDesc)_dictionaryOwner); conditionalDependencies.Add(new CombinedDependencyListEntry(_lookupSignature.TemplateDictionaryNode(factory), templateLayout, "Type loader template")); } if (_id == ReadyToRunHelperId.GetGCStaticBase || _id == ReadyToRunHelperId.GetThreadStaticBase) { // If the type has a lazy static constructor, we also need the non-GC static base to be available as // a template dictionary node. TypeDesc type = (TypeDesc)_target; Debug.Assert(templateLayout != null); if (factory.PreinitializationManager.HasLazyStaticConstructor(type)) { GenericLookupResult nonGcRegionLookup = factory.GenericLookup.TypeNonGCStaticBase(type); conditionalDependencies.Add(new CombinedDependencyListEntry(nonGcRegionLookup.TemplateDictionaryNode(factory), templateLayout, "Type loader template")); } } return conditionalDependencies; } public override int CompareToImpl(ISortableNode other, CompilerComparer comparer) { var compare = _id.CompareTo(((ReadyToRunGenericHelperNode)other)._id); if (compare != 0) return compare; if (_dictionaryOwner is MethodDesc) { if (((ReadyToRunGenericHelperNode)other)._dictionaryOwner is TypeDesc) return -1; compare = comparer.Compare((MethodDesc)_dictionaryOwner, (MethodDesc)((ReadyToRunGenericHelperNode)other)._dictionaryOwner); } else { if (((ReadyToRunGenericHelperNode)other)._dictionaryOwner is MethodDesc) return 1; compare = comparer.Compare((TypeDesc)_dictionaryOwner, (TypeDesc)((ReadyToRunGenericHelperNode)other)._dictionaryOwner); } if (compare != 0) return compare; switch (_id) { case ReadyToRunHelperId.TypeHandle: case ReadyToRunHelperId.GetGCStaticBase: case ReadyToRunHelperId.GetNonGCStaticBase: case ReadyToRunHelperId.GetThreadStaticBase: case ReadyToRunHelperId.DefaultConstructor: case ReadyToRunHelperId.ObjectAllocator: return comparer.Compare((TypeDesc)_target, (TypeDesc)((ReadyToRunGenericHelperNode)other)._target); case ReadyToRunHelperId.MethodHandle: case ReadyToRunHelperId.MethodDictionary: case ReadyToRunHelperId.VirtualDispatchCell: case ReadyToRunHelperId.MethodEntry: return comparer.Compare((MethodDesc)_target, (MethodDesc)((ReadyToRunGenericHelperNode)other)._target); case ReadyToRunHelperId.FieldHandle: return comparer.Compare((FieldDesc)_target, (FieldDesc)((ReadyToRunGenericHelperNode)other)._target); case ReadyToRunHelperId.DelegateCtor: return ((DelegateCreationInfo)_target).CompareTo((DelegateCreationInfo)((ReadyToRunGenericHelperNode)other)._target, comparer); default: throw new NotImplementedException(); } } } public partial class ReadyToRunGenericLookupFromDictionaryNode : ReadyToRunGenericHelperNode { public ReadyToRunGenericLookupFromDictionaryNode(NodeFactory factory, ReadyToRunHelperId helperId, object target, TypeSystemEntity dictionaryOwner) : base(factory, helperId, target, dictionaryOwner) { } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { Utf8String mangledContextName; if (_dictionaryOwner is MethodDesc) mangledContextName = nameMangler.GetMangledMethodName((MethodDesc)_dictionaryOwner); else mangledContextName = nameMangler.GetMangledTypeName((TypeDesc)_dictionaryOwner); sb.Append("__GenericLookupFromDict_").Append(mangledContextName).Append("_"); AppendLookupSignatureMangledName(nameMangler, sb); } public override int ClassCode => 1055354299; } public partial class ReadyToRunGenericLookupFromTypeNode : ReadyToRunGenericHelperNode { public ReadyToRunGenericLookupFromTypeNode(NodeFactory factory, ReadyToRunHelperId helperId, object target, TypeSystemEntity dictionaryOwner) : base(factory, helperId, target, dictionaryOwner) { } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { Utf8String mangledContextName; if (_dictionaryOwner is MethodDesc) mangledContextName = nameMangler.GetMangledMethodName((MethodDesc)_dictionaryOwner); else mangledContextName = nameMangler.GetMangledTypeName((TypeDesc)_dictionaryOwner); sb.Append("__GenericLookupFromType_").Append(mangledContextName).Append("_"); AppendLookupSignatureMangledName(nameMangler, sb); } public override int ClassCode => 913214059; } }
1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/ReadyToRunHelperNode.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using Internal.Text; using Internal.TypeSystem; using Debug = System.Diagnostics.Debug; namespace ILCompiler.DependencyAnalysis { public enum ReadyToRunHelperId { Invalid, NewHelper, NewArr1, VirtualCall, IsInstanceOf, CastClass, GetNonGCStaticBase, GetGCStaticBase, GetThreadStaticBase, GetThreadNonGcStaticBase, DelegateCtor, ResolveVirtualFunction, CctorTrigger, // The following helpers are used for generic lookups only TypeHandle, NecessaryTypeHandle, DeclaringTypeHandle, MethodHandle, FieldHandle, MethodDictionary, TypeDictionary, MethodEntry, VirtualDispatchCell, DefaultConstructor, TypeHandleForCasting, ObjectAllocator, } public partial class ReadyToRunHelperNode : AssemblyStubNode, INodeWithDebugInfo { private readonly ReadyToRunHelperId _id; private readonly Object _target; public ReadyToRunHelperNode(ReadyToRunHelperId id, Object target) { _id = id; _target = target; switch (id) { case ReadyToRunHelperId.GetNonGCStaticBase: case ReadyToRunHelperId.GetGCStaticBase: case ReadyToRunHelperId.GetThreadStaticBase: { // Make sure we can compute static field layout now so we can fail early DefType defType = (DefType)target; defType.ComputeStaticFieldLayout(StaticLayoutKind.StaticRegionSizesAndFields); } break; case ReadyToRunHelperId.VirtualCall: case ReadyToRunHelperId.ResolveVirtualFunction: { // Make sure we aren't trying to callvirt Object.Finalize MethodDesc method = (MethodDesc)target; if (method.IsFinalizer) ThrowHelper.ThrowInvalidProgramException(ExceptionStringID.InvalidProgramCallVirtFinalize, method); // Method should be in fully canonical form. Otherwise we're being wasteful and generate more // helpers than needed. Debug.Assert(!method.IsCanonicalMethod(CanonicalFormKind.Any) || method.GetCanonMethodTarget(CanonicalFormKind.Specific) == method); } break; } } protected override bool IsVisibleFromManagedCode => false; protected override string GetName(NodeFactory factory) => this.GetMangledName(factory.NameMangler); public ReadyToRunHelperId Id => _id; public Object Target => _target; public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { switch (_id) { case ReadyToRunHelperId.VirtualCall: sb.Append("__VirtualCall_").Append(nameMangler.GetMangledMethodName((MethodDesc)_target)); break; case ReadyToRunHelperId.GetNonGCStaticBase: sb.Append("__GetNonGCStaticBase_").Append(nameMangler.GetMangledTypeName((TypeDesc)_target)); break; case ReadyToRunHelperId.GetGCStaticBase: sb.Append("__GetGCStaticBase_").Append(nameMangler.GetMangledTypeName((TypeDesc)_target)); break; case ReadyToRunHelperId.GetThreadStaticBase: sb.Append("__GetThreadStaticBase_").Append(nameMangler.GetMangledTypeName((TypeDesc)_target)); break; case ReadyToRunHelperId.DelegateCtor: ((DelegateCreationInfo)_target).AppendMangledName(nameMangler, sb); break; case ReadyToRunHelperId.ResolveVirtualFunction: sb.Append("__ResolveVirtualFunction_"); sb.Append(nameMangler.GetMangledMethodName((MethodDesc)_target)); break; default: throw new NotImplementedException(); } } public bool IsStateMachineMoveNextMethod => false; public override bool IsShareable => true; protected override DependencyList ComputeNonRelocationBasedDependencies(NodeFactory factory) { if (_id == ReadyToRunHelperId.VirtualCall || _id == ReadyToRunHelperId.ResolveVirtualFunction) { var targetMethod = (MethodDesc)_target; DependencyList dependencyList = new DependencyList(); #if !SUPPORT_JIT factory.MetadataManager.GetDependenciesDueToVirtualMethodReflectability(ref dependencyList, factory, targetMethod); if (!factory.VTable(targetMethod.OwningType).HasFixedSlots) { dependencyList.Add(factory.VirtualMethodUse((MethodDesc)_target), "ReadyToRun Virtual Method Call"); } #endif return dependencyList; } else if (_id == ReadyToRunHelperId.DelegateCtor) { var info = (DelegateCreationInfo)_target; if (info.NeedsVirtualMethodUseTracking) { MethodDesc targetMethod = info.TargetMethod; DependencyList dependencyList = new DependencyList(); #if !SUPPORT_JIT factory.MetadataManager.GetDependenciesDueToVirtualMethodReflectability(ref dependencyList, factory, targetMethod); if (!factory.VTable(info.TargetMethod.OwningType).HasFixedSlots) { dependencyList.Add(factory.VirtualMethodUse(info.TargetMethod), "ReadyToRun Delegate to virtual method"); } #endif return dependencyList; } } return null; } IEnumerable<NativeSequencePoint> INodeWithDebugInfo.GetNativeSequencePoints() { if (_id == ReadyToRunHelperId.VirtualCall) { // Generate debug information that lets debuggers step into the virtual calls. // We generate a step into sequence point at the point where the helper jumps to // the target of the virtual call. TargetDetails target = ((MethodDesc)_target).Context.Target; int debuggerStepInOffset = -1; switch (target.Architecture) { case TargetArchitecture.X64: debuggerStepInOffset = 3; break; } if (debuggerStepInOffset != -1) { return new NativeSequencePoint[] { new NativeSequencePoint(0, String.Empty, WellKnownLineNumber.DebuggerStepThrough), new NativeSequencePoint(debuggerStepInOffset, String.Empty, WellKnownLineNumber.DebuggerStepIn) }; } } return Array.Empty<NativeSequencePoint>(); } IEnumerable<DebugVarInfoMetadata> INodeWithDebugInfo.GetDebugVars() { return Array.Empty<DebugVarInfoMetadata>(); } #if !SUPPORT_JIT public override int ClassCode => -911637948; public override int CompareToImpl(ISortableNode other, CompilerComparer comparer) { var compare = _id.CompareTo(((ReadyToRunHelperNode)other)._id); if (compare != 0) return compare; switch (_id) { case ReadyToRunHelperId.GetNonGCStaticBase: case ReadyToRunHelperId.GetGCStaticBase: case ReadyToRunHelperId.GetThreadStaticBase: return comparer.Compare((TypeDesc)_target, (TypeDesc)((ReadyToRunHelperNode)other)._target); case ReadyToRunHelperId.VirtualCall: case ReadyToRunHelperId.ResolveVirtualFunction: return comparer.Compare((MethodDesc)_target, (MethodDesc)((ReadyToRunHelperNode)other)._target); case ReadyToRunHelperId.DelegateCtor: return ((DelegateCreationInfo)_target).CompareTo((DelegateCreationInfo)((ReadyToRunHelperNode)other)._target, comparer); default: throw new NotImplementedException(); } } #endif } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using Internal.Text; using Internal.TypeSystem; using Debug = System.Diagnostics.Debug; namespace ILCompiler.DependencyAnalysis { public enum ReadyToRunHelperId { Invalid, NewHelper, NewArr1, VirtualCall, IsInstanceOf, CastClass, GetNonGCStaticBase, GetGCStaticBase, GetThreadStaticBase, GetThreadNonGcStaticBase, DelegateCtor, ResolveVirtualFunction, CctorTrigger, // The following helpers are used for generic lookups only TypeHandle, NecessaryTypeHandle, DeclaringTypeHandle, MethodHandle, FieldHandle, MethodDictionary, TypeDictionary, MethodEntry, VirtualDispatchCell, DefaultConstructor, TypeHandleForCasting, ObjectAllocator, ConstrainedDirectCall, } public partial class ReadyToRunHelperNode : AssemblyStubNode, INodeWithDebugInfo { private readonly ReadyToRunHelperId _id; private readonly Object _target; public ReadyToRunHelperNode(ReadyToRunHelperId id, Object target) { _id = id; _target = target; switch (id) { case ReadyToRunHelperId.GetNonGCStaticBase: case ReadyToRunHelperId.GetGCStaticBase: case ReadyToRunHelperId.GetThreadStaticBase: { // Make sure we can compute static field layout now so we can fail early DefType defType = (DefType)target; defType.ComputeStaticFieldLayout(StaticLayoutKind.StaticRegionSizesAndFields); } break; case ReadyToRunHelperId.VirtualCall: case ReadyToRunHelperId.ResolveVirtualFunction: { // Make sure we aren't trying to callvirt Object.Finalize MethodDesc method = (MethodDesc)target; if (method.IsFinalizer) ThrowHelper.ThrowInvalidProgramException(ExceptionStringID.InvalidProgramCallVirtFinalize, method); // Method should be in fully canonical form. Otherwise we're being wasteful and generate more // helpers than needed. Debug.Assert(!method.IsCanonicalMethod(CanonicalFormKind.Any) || method.GetCanonMethodTarget(CanonicalFormKind.Specific) == method); } break; } } protected override bool IsVisibleFromManagedCode => false; protected override string GetName(NodeFactory factory) => this.GetMangledName(factory.NameMangler); public ReadyToRunHelperId Id => _id; public Object Target => _target; public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { switch (_id) { case ReadyToRunHelperId.VirtualCall: sb.Append("__VirtualCall_").Append(nameMangler.GetMangledMethodName((MethodDesc)_target)); break; case ReadyToRunHelperId.GetNonGCStaticBase: sb.Append("__GetNonGCStaticBase_").Append(nameMangler.GetMangledTypeName((TypeDesc)_target)); break; case ReadyToRunHelperId.GetGCStaticBase: sb.Append("__GetGCStaticBase_").Append(nameMangler.GetMangledTypeName((TypeDesc)_target)); break; case ReadyToRunHelperId.GetThreadStaticBase: sb.Append("__GetThreadStaticBase_").Append(nameMangler.GetMangledTypeName((TypeDesc)_target)); break; case ReadyToRunHelperId.DelegateCtor: ((DelegateCreationInfo)_target).AppendMangledName(nameMangler, sb); break; case ReadyToRunHelperId.ResolveVirtualFunction: sb.Append("__ResolveVirtualFunction_"); sb.Append(nameMangler.GetMangledMethodName((MethodDesc)_target)); break; default: throw new NotImplementedException(); } } public bool IsStateMachineMoveNextMethod => false; public override bool IsShareable => true; protected override DependencyList ComputeNonRelocationBasedDependencies(NodeFactory factory) { if (_id == ReadyToRunHelperId.VirtualCall || _id == ReadyToRunHelperId.ResolveVirtualFunction) { var targetMethod = (MethodDesc)_target; DependencyList dependencyList = new DependencyList(); #if !SUPPORT_JIT factory.MetadataManager.GetDependenciesDueToVirtualMethodReflectability(ref dependencyList, factory, targetMethod); if (!factory.VTable(targetMethod.OwningType).HasFixedSlots) { dependencyList.Add(factory.VirtualMethodUse((MethodDesc)_target), "ReadyToRun Virtual Method Call"); } #endif return dependencyList; } else if (_id == ReadyToRunHelperId.DelegateCtor) { var info = (DelegateCreationInfo)_target; if (info.NeedsVirtualMethodUseTracking) { MethodDesc targetMethod = info.TargetMethod; DependencyList dependencyList = new DependencyList(); #if !SUPPORT_JIT factory.MetadataManager.GetDependenciesDueToVirtualMethodReflectability(ref dependencyList, factory, targetMethod); if (!factory.VTable(info.TargetMethod.OwningType).HasFixedSlots) { dependencyList.Add(factory.VirtualMethodUse(info.TargetMethod), "ReadyToRun Delegate to virtual method"); } #endif return dependencyList; } } return null; } IEnumerable<NativeSequencePoint> INodeWithDebugInfo.GetNativeSequencePoints() { if (_id == ReadyToRunHelperId.VirtualCall) { // Generate debug information that lets debuggers step into the virtual calls. // We generate a step into sequence point at the point where the helper jumps to // the target of the virtual call. TargetDetails target = ((MethodDesc)_target).Context.Target; int debuggerStepInOffset = -1; switch (target.Architecture) { case TargetArchitecture.X64: debuggerStepInOffset = 3; break; } if (debuggerStepInOffset != -1) { return new NativeSequencePoint[] { new NativeSequencePoint(0, String.Empty, WellKnownLineNumber.DebuggerStepThrough), new NativeSequencePoint(debuggerStepInOffset, String.Empty, WellKnownLineNumber.DebuggerStepIn) }; } } return Array.Empty<NativeSequencePoint>(); } IEnumerable<DebugVarInfoMetadata> INodeWithDebugInfo.GetDebugVars() { return Array.Empty<DebugVarInfoMetadata>(); } #if !SUPPORT_JIT public override int ClassCode => -911637948; public override int CompareToImpl(ISortableNode other, CompilerComparer comparer) { var compare = _id.CompareTo(((ReadyToRunHelperNode)other)._id); if (compare != 0) return compare; switch (_id) { case ReadyToRunHelperId.GetNonGCStaticBase: case ReadyToRunHelperId.GetGCStaticBase: case ReadyToRunHelperId.GetThreadStaticBase: return comparer.Compare((TypeDesc)_target, (TypeDesc)((ReadyToRunHelperNode)other)._target); case ReadyToRunHelperId.VirtualCall: case ReadyToRunHelperId.ResolveVirtualFunction: return comparer.Compare((MethodDesc)_target, (MethodDesc)((ReadyToRunHelperNode)other)._target); case ReadyToRunHelperId.DelegateCtor: return ((DelegateCreationInfo)_target).CompareTo((DelegateCreationInfo)((ReadyToRunHelperNode)other)._target, comparer); default: throw new NotImplementedException(); } } #endif } }
1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/Target_ARM/ARMReadyToRunGenericHelperNode.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using ILCompiler.DependencyAnalysis.ARM; using Internal.TypeSystem; using Debug = System.Diagnostics.Debug; namespace ILCompiler.DependencyAnalysis { partial class ReadyToRunGenericHelperNode { protected Register GetContextRegister(ref /* readonly */ ARMEmitter encoder) { if (_id == ReadyToRunHelperId.DelegateCtor) return encoder.TargetRegister.Arg2; else return encoder.TargetRegister.Arg0; } protected void EmitDictionaryLookup(NodeFactory factory, ref ARMEmitter encoder, Register context, Register result, GenericLookupResult lookup, bool relocsOnly) { // INVARIANT: must not trash context register // Find the generic dictionary slot int dictionarySlot = 0; if (!relocsOnly) { // The concrete slot won't be known until we're emitting data - don't ask for it in relocsOnly. dictionarySlot = factory.GenericDictionaryLayout(_dictionaryOwner).GetSlotForEntry(lookup); } // Load the generic dictionary cell encoder.EmitLDR(result, context, dictionarySlot * factory.Target.PointerSize); switch (lookup.LookupResultReferenceType(factory)) { case GenericLookupResultReferenceType.Indirect: // Do another indirection encoder.EmitLDR(result, result); break; case GenericLookupResultReferenceType.ConditionalIndirect: // Test result, 0x1 // JEQ L1 // mov result, [result-1] // L1: throw new NotImplementedException(); default: break; } } protected sealed override void EmitCode(NodeFactory factory, ref ARMEmitter encoder, bool relocsOnly) { // First load the generic context into the context register. EmitLoadGenericContext(factory, ref encoder, relocsOnly); Register contextRegister = GetContextRegister(ref encoder); switch (_id) { case ReadyToRunHelperId.GetNonGCStaticBase: { Debug.Assert(contextRegister == encoder.TargetRegister.Arg0); EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Result, _lookupSignature, relocsOnly); MetadataType target = (MetadataType)_target; if (!factory.PreinitializationManager.HasLazyStaticConstructor(target)) { encoder.EmitRET(); } else { // We need to trigger the cctor before returning the base. It is stored at the beginning of the non-GC statics region. int cctorContextSize = NonGCStaticsNode.GetClassConstructorContextSize(factory.Target); encoder.EmitLDR(encoder.TargetRegister.Arg1, encoder.TargetRegister.Arg0, ((short)(factory.Target.PointerSize - cctorContextSize))); encoder.EmitCMP(encoder.TargetRegister.Arg1, ((byte)1)); encoder.EmitRETIfEqual(); encoder.EmitMOV(encoder.TargetRegister.Arg1, encoder.TargetRegister.Result); encoder.EmitSUB(encoder.TargetRegister.Arg0, ((byte)(cctorContextSize))); encoder.EmitJMP(factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnNonGCStaticBase)); } } break; case ReadyToRunHelperId.GetGCStaticBase: { Debug.Assert(contextRegister == encoder.TargetRegister.Arg0); encoder.EmitMOV(encoder.TargetRegister.Arg1, encoder.TargetRegister.Arg0); EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Result, _lookupSignature, relocsOnly); encoder.EmitLDR(encoder.TargetRegister.Result, encoder.TargetRegister.Result); MetadataType target = (MetadataType)_target; if (!factory.PreinitializationManager.HasLazyStaticConstructor(target)) { encoder.EmitRET(); } else { // We need to trigger the cctor before returning the base. It is stored at the beginning of the non-GC statics region. GenericLookupResult nonGcRegionLookup = factory.GenericLookup.TypeNonGCStaticBase(target); EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg1, encoder.TargetRegister.Arg2, nonGcRegionLookup, relocsOnly); int cctorContextSize = NonGCStaticsNode.GetClassConstructorContextSize(factory.Target); encoder.EmitLDR(encoder.TargetRegister.Arg3, encoder.TargetRegister.Arg2, ((short)(factory.Target.PointerSize - cctorContextSize))); encoder.EmitCMP(encoder.TargetRegister.Arg3, 1); encoder.EmitRETIfEqual(); encoder.EmitMOV(encoder.TargetRegister.Arg1, encoder.TargetRegister.Result); encoder.EmitMOV(encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg2); encoder.EmitSUB(encoder.TargetRegister.Arg0, cctorContextSize); encoder.EmitJMP(factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnGCStaticBase)); } } break; case ReadyToRunHelperId.GetThreadStaticBase: { Debug.Assert(contextRegister == encoder.TargetRegister.Arg0); MetadataType target = (MetadataType)_target; // Look up the index cell EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg1, _lookupSignature, relocsOnly); ISymbolNode helperEntrypoint; if (factory.PreinitializationManager.HasLazyStaticConstructor(target)) { // There is a lazy class constructor. We need the non-GC static base because that's where the // class constructor context lives. GenericLookupResult nonGcRegionLookup = factory.GenericLookup.TypeNonGCStaticBase(target); EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg2, nonGcRegionLookup, relocsOnly); int cctorContextSize = NonGCStaticsNode.GetClassConstructorContextSize(factory.Target); encoder.EmitSUB(encoder.TargetRegister.Arg2, cctorContextSize); helperEntrypoint = factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnThreadStaticBase); } else { helperEntrypoint = factory.HelperEntrypoint(HelperEntrypoint.GetThreadStaticBaseForType); } // First arg: address of the TypeManager slot that provides the helper with // information about module index and the type manager instance (which is used // for initialization on first access). encoder.EmitLDR(encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg1); // Second arg: index of the type in the ThreadStatic section of the modules encoder.EmitLDR(encoder.TargetRegister.Arg1, encoder.TargetRegister.Arg1, factory.Target.PointerSize); encoder.EmitJMP(helperEntrypoint); } break; case ReadyToRunHelperId.DelegateCtor: { // This is a weird helper. Codegen populated Arg0 and Arg1 with the values that the constructor // method expects. Codegen also passed us the generic context in Arg2. // We now need to load the delegate target method into Arg2 (using a dictionary lookup) // and the optional 4th parameter, and call the ctor. Debug.Assert(contextRegister == encoder.TargetRegister.Arg2); var target = (DelegateCreationInfo)_target; EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg2, encoder.TargetRegister.Arg2, _lookupSignature, relocsOnly); if (target.Thunk != null) { Debug.Assert(target.Constructor.Method.Signature.Length == 3); encoder.EmitMOV(encoder.TargetRegister.Arg3, target.Thunk); } else { Debug.Assert(target.Constructor.Method.Signature.Length == 2); } encoder.EmitJMP(target.Constructor); } break; // These are all simple: just get the thing from the dictionary and we're done case ReadyToRunHelperId.TypeHandle: case ReadyToRunHelperId.MethodHandle: case ReadyToRunHelperId.FieldHandle: case ReadyToRunHelperId.MethodDictionary: case ReadyToRunHelperId.MethodEntry: case ReadyToRunHelperId.VirtualDispatchCell: case ReadyToRunHelperId.DefaultConstructor: case ReadyToRunHelperId.ObjectAllocator: case ReadyToRunHelperId.TypeHandleForCasting: { EmitDictionaryLookup(factory, ref encoder, contextRegister, encoder.TargetRegister.Result, _lookupSignature, relocsOnly); encoder.EmitRET(); } break; default: throw new NotImplementedException(); } } protected virtual void EmitLoadGenericContext(NodeFactory factory, ref ARMEmitter encoder, bool relocsOnly) { // Assume generic context is already loaded in the context register. } } partial class ReadyToRunGenericLookupFromTypeNode { protected override void EmitLoadGenericContext(NodeFactory factory, ref ARMEmitter encoder, bool relocsOnly) { // We start with context register pointing to the MethodTable Register contextRegister = GetContextRegister(ref encoder); // Locate the VTable slot that points to the dictionary int vtableSlot = 0; if (!relocsOnly) { // The concrete slot won't be known until we're emitting data - don't ask for it in relocsOnly. vtableSlot = VirtualMethodSlotHelper.GetGenericDictionarySlot(factory, (TypeDesc)_dictionaryOwner); } int pointerSize = factory.Target.PointerSize; int slotOffset = EETypeNode.GetVTableOffset(pointerSize) + (vtableSlot * pointerSize); // Load the dictionary pointer from the VTable encoder.EmitLDR(contextRegister, contextRegister, slotOffset); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using ILCompiler.DependencyAnalysis.ARM; using Internal.TypeSystem; using Debug = System.Diagnostics.Debug; namespace ILCompiler.DependencyAnalysis { partial class ReadyToRunGenericHelperNode { protected Register GetContextRegister(ref /* readonly */ ARMEmitter encoder) { if (_id == ReadyToRunHelperId.DelegateCtor) return encoder.TargetRegister.Arg2; else return encoder.TargetRegister.Arg0; } protected void EmitDictionaryLookup(NodeFactory factory, ref ARMEmitter encoder, Register context, Register result, GenericLookupResult lookup, bool relocsOnly) { // INVARIANT: must not trash context register // Find the generic dictionary slot int dictionarySlot = 0; if (!relocsOnly) { // The concrete slot won't be known until we're emitting data - don't ask for it in relocsOnly. dictionarySlot = factory.GenericDictionaryLayout(_dictionaryOwner).GetSlotForEntry(lookup); } // Load the generic dictionary cell encoder.EmitLDR(result, context, dictionarySlot * factory.Target.PointerSize); switch (lookup.LookupResultReferenceType(factory)) { case GenericLookupResultReferenceType.Indirect: // Do another indirection encoder.EmitLDR(result, result); break; case GenericLookupResultReferenceType.ConditionalIndirect: // Test result, 0x1 // JEQ L1 // mov result, [result-1] // L1: throw new NotImplementedException(); default: break; } } protected sealed override void EmitCode(NodeFactory factory, ref ARMEmitter encoder, bool relocsOnly) { // First load the generic context into the context register. EmitLoadGenericContext(factory, ref encoder, relocsOnly); Register contextRegister = GetContextRegister(ref encoder); switch (_id) { case ReadyToRunHelperId.GetNonGCStaticBase: { Debug.Assert(contextRegister == encoder.TargetRegister.Arg0); EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Result, _lookupSignature, relocsOnly); MetadataType target = (MetadataType)_target; if (!factory.PreinitializationManager.HasLazyStaticConstructor(target)) { encoder.EmitRET(); } else { // We need to trigger the cctor before returning the base. It is stored at the beginning of the non-GC statics region. int cctorContextSize = NonGCStaticsNode.GetClassConstructorContextSize(factory.Target); encoder.EmitLDR(encoder.TargetRegister.Arg1, encoder.TargetRegister.Arg0, ((short)(factory.Target.PointerSize - cctorContextSize))); encoder.EmitCMP(encoder.TargetRegister.Arg1, ((byte)1)); encoder.EmitRETIfEqual(); encoder.EmitMOV(encoder.TargetRegister.Arg1, encoder.TargetRegister.Result); encoder.EmitSUB(encoder.TargetRegister.Arg0, ((byte)(cctorContextSize))); encoder.EmitJMP(factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnNonGCStaticBase)); } } break; case ReadyToRunHelperId.GetGCStaticBase: { Debug.Assert(contextRegister == encoder.TargetRegister.Arg0); encoder.EmitMOV(encoder.TargetRegister.Arg1, encoder.TargetRegister.Arg0); EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Result, _lookupSignature, relocsOnly); encoder.EmitLDR(encoder.TargetRegister.Result, encoder.TargetRegister.Result); MetadataType target = (MetadataType)_target; if (!factory.PreinitializationManager.HasLazyStaticConstructor(target)) { encoder.EmitRET(); } else { // We need to trigger the cctor before returning the base. It is stored at the beginning of the non-GC statics region. GenericLookupResult nonGcRegionLookup = factory.GenericLookup.TypeNonGCStaticBase(target); EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg1, encoder.TargetRegister.Arg2, nonGcRegionLookup, relocsOnly); int cctorContextSize = NonGCStaticsNode.GetClassConstructorContextSize(factory.Target); encoder.EmitLDR(encoder.TargetRegister.Arg3, encoder.TargetRegister.Arg2, ((short)(factory.Target.PointerSize - cctorContextSize))); encoder.EmitCMP(encoder.TargetRegister.Arg3, 1); encoder.EmitRETIfEqual(); encoder.EmitMOV(encoder.TargetRegister.Arg1, encoder.TargetRegister.Result); encoder.EmitMOV(encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg2); encoder.EmitSUB(encoder.TargetRegister.Arg0, cctorContextSize); encoder.EmitJMP(factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnGCStaticBase)); } } break; case ReadyToRunHelperId.GetThreadStaticBase: { Debug.Assert(contextRegister == encoder.TargetRegister.Arg0); MetadataType target = (MetadataType)_target; // Look up the index cell EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg1, _lookupSignature, relocsOnly); ISymbolNode helperEntrypoint; if (factory.PreinitializationManager.HasLazyStaticConstructor(target)) { // There is a lazy class constructor. We need the non-GC static base because that's where the // class constructor context lives. GenericLookupResult nonGcRegionLookup = factory.GenericLookup.TypeNonGCStaticBase(target); EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg2, nonGcRegionLookup, relocsOnly); int cctorContextSize = NonGCStaticsNode.GetClassConstructorContextSize(factory.Target); encoder.EmitSUB(encoder.TargetRegister.Arg2, cctorContextSize); helperEntrypoint = factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnThreadStaticBase); } else { helperEntrypoint = factory.HelperEntrypoint(HelperEntrypoint.GetThreadStaticBaseForType); } // First arg: address of the TypeManager slot that provides the helper with // information about module index and the type manager instance (which is used // for initialization on first access). encoder.EmitLDR(encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg1); // Second arg: index of the type in the ThreadStatic section of the modules encoder.EmitLDR(encoder.TargetRegister.Arg1, encoder.TargetRegister.Arg1, factory.Target.PointerSize); encoder.EmitJMP(helperEntrypoint); } break; case ReadyToRunHelperId.DelegateCtor: { // This is a weird helper. Codegen populated Arg0 and Arg1 with the values that the constructor // method expects. Codegen also passed us the generic context in Arg2. // We now need to load the delegate target method into Arg2 (using a dictionary lookup) // and the optional 4th parameter, and call the ctor. Debug.Assert(contextRegister == encoder.TargetRegister.Arg2); var target = (DelegateCreationInfo)_target; EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg2, encoder.TargetRegister.Arg2, _lookupSignature, relocsOnly); if (target.Thunk != null) { Debug.Assert(target.Constructor.Method.Signature.Length == 3); encoder.EmitMOV(encoder.TargetRegister.Arg3, target.Thunk); } else { Debug.Assert(target.Constructor.Method.Signature.Length == 2); } encoder.EmitJMP(target.Constructor); } break; // These are all simple: just get the thing from the dictionary and we're done case ReadyToRunHelperId.TypeHandle: case ReadyToRunHelperId.MethodHandle: case ReadyToRunHelperId.FieldHandle: case ReadyToRunHelperId.MethodDictionary: case ReadyToRunHelperId.MethodEntry: case ReadyToRunHelperId.VirtualDispatchCell: case ReadyToRunHelperId.DefaultConstructor: case ReadyToRunHelperId.ObjectAllocator: case ReadyToRunHelperId.TypeHandleForCasting: case ReadyToRunHelperId.ConstrainedDirectCall: { EmitDictionaryLookup(factory, ref encoder, contextRegister, encoder.TargetRegister.Result, _lookupSignature, relocsOnly); encoder.EmitRET(); } break; default: throw new NotImplementedException(); } } protected virtual void EmitLoadGenericContext(NodeFactory factory, ref ARMEmitter encoder, bool relocsOnly) { // Assume generic context is already loaded in the context register. } } partial class ReadyToRunGenericLookupFromTypeNode { protected override void EmitLoadGenericContext(NodeFactory factory, ref ARMEmitter encoder, bool relocsOnly) { // We start with context register pointing to the MethodTable Register contextRegister = GetContextRegister(ref encoder); // Locate the VTable slot that points to the dictionary int vtableSlot = 0; if (!relocsOnly) { // The concrete slot won't be known until we're emitting data - don't ask for it in relocsOnly. vtableSlot = VirtualMethodSlotHelper.GetGenericDictionarySlot(factory, (TypeDesc)_dictionaryOwner); } int pointerSize = factory.Target.PointerSize; int slotOffset = EETypeNode.GetVTableOffset(pointerSize) + (vtableSlot * pointerSize); // Load the dictionary pointer from the VTable encoder.EmitLDR(contextRegister, contextRegister, slotOffset); } } }
1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/Target_ARM64/ARM64ReadyToRunGenericHelperNode.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using ILCompiler.DependencyAnalysis.ARM64; using Internal.TypeSystem; using Debug = System.Diagnostics.Debug; namespace ILCompiler.DependencyAnalysis { partial class ReadyToRunGenericHelperNode { protected Register GetContextRegister(ref /* readonly */ ARM64Emitter encoder) { if (_id == ReadyToRunHelperId.DelegateCtor) return encoder.TargetRegister.Arg2; else return encoder.TargetRegister.Arg0; } protected void EmitDictionaryLookup(NodeFactory factory, ref ARM64Emitter encoder, Register context, Register result, GenericLookupResult lookup, bool relocsOnly) { // INVARIANT: must not trash context register // Find the generic dictionary slot int dictionarySlot = 0; if (!relocsOnly) { // The concrete slot won't be known until we're emitting data - don't ask for it in relocsOnly. dictionarySlot = factory.GenericDictionaryLayout(_dictionaryOwner).GetSlotForEntry(lookup); } // Load the generic dictionary cell encoder.EmitLDR(result, context, dictionarySlot * factory.Target.PointerSize); switch (lookup.LookupResultReferenceType(factory)) { case GenericLookupResultReferenceType.Indirect: // Do another indirection encoder.EmitLDR(result, result); break; case GenericLookupResultReferenceType.ConditionalIndirect: // Test result, 0x1 // JEQ L1 // mov result, [result-1] // L1: throw new NotImplementedException(); default: break; } } protected sealed override void EmitCode(NodeFactory factory, ref ARM64Emitter encoder, bool relocsOnly) { // First load the generic context into the context register. EmitLoadGenericContext(factory, ref encoder, relocsOnly); Register contextRegister = GetContextRegister(ref encoder); switch (_id) { case ReadyToRunHelperId.GetNonGCStaticBase: { Debug.Assert(contextRegister == encoder.TargetRegister.Arg0); EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Result, _lookupSignature, relocsOnly); MetadataType target = (MetadataType)_target; if (!factory.PreinitializationManager.HasLazyStaticConstructor(target)) { encoder.EmitRET(); } else { // We need to trigger the cctor before returning the base. It is stored at the beginning of the non-GC statics region. encoder.EmitSUB(encoder.TargetRegister.Arg3, encoder.TargetRegister.Arg0, NonGCStaticsNode.GetClassConstructorContextSize(factory.Target)); encoder.EmitLDR(encoder.TargetRegister.Arg2, encoder.TargetRegister.Arg3, (short)factory.Target.PointerSize); encoder.EmitCMP(encoder.TargetRegister.Arg2, 1); encoder.EmitRETIfEqual(); encoder.EmitMOV(encoder.TargetRegister.Arg1, encoder.TargetRegister.Result); encoder.EmitMOV(encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg3); encoder.EmitJMP(factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnNonGCStaticBase)); } } break; case ReadyToRunHelperId.GetGCStaticBase: { Debug.Assert(contextRegister == encoder.TargetRegister.Arg0); encoder.EmitMOV(encoder.TargetRegister.Arg1, encoder.TargetRegister.Arg0); EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Result, _lookupSignature, relocsOnly); encoder.EmitLDR(encoder.TargetRegister.Result, encoder.TargetRegister.Result); MetadataType target = (MetadataType)_target; if (!factory.PreinitializationManager.HasLazyStaticConstructor(target)) { encoder.EmitRET(); } else { // We need to trigger the cctor before returning the base. It is stored at the beginning of the non-GC statics region. GenericLookupResult nonGcRegionLookup = factory.GenericLookup.TypeNonGCStaticBase(target); EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg1, encoder.TargetRegister.Arg2, nonGcRegionLookup, relocsOnly); encoder.EmitSUB(encoder.TargetRegister.Arg2, NonGCStaticsNode.GetClassConstructorContextSize(factory.Target)); encoder.EmitLDR(encoder.TargetRegister.Arg3, encoder.TargetRegister.Arg2, (short)factory.Target.PointerSize); encoder.EmitCMP(encoder.TargetRegister.Arg3, 1); encoder.EmitRETIfEqual(); encoder.EmitMOV(encoder.TargetRegister.Arg1, encoder.TargetRegister.Result); encoder.EmitMOV(encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg2); encoder.EmitJMP(factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnGCStaticBase)); } } break; case ReadyToRunHelperId.GetThreadStaticBase: { Debug.Assert(contextRegister == encoder.TargetRegister.Arg0); MetadataType target = (MetadataType)_target; // Look up the index cell EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg1, _lookupSignature, relocsOnly); ISymbolNode helperEntrypoint; if (factory.PreinitializationManager.HasLazyStaticConstructor(target)) { // There is a lazy class constructor. We need the non-GC static base because that's where the // class constructor context lives. GenericLookupResult nonGcRegionLookup = factory.GenericLookup.TypeNonGCStaticBase(target); EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg2, nonGcRegionLookup, relocsOnly); int cctorContextSize = NonGCStaticsNode.GetClassConstructorContextSize(factory.Target); encoder.EmitSUB(encoder.TargetRegister.Arg2, cctorContextSize); helperEntrypoint = factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnThreadStaticBase); } else { helperEntrypoint = factory.HelperEntrypoint(HelperEntrypoint.GetThreadStaticBaseForType); } // First arg: address of the TypeManager slot that provides the helper with // information about module index and the type manager instance (which is used // for initialization on first access). encoder.EmitLDR(encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg1); // Second arg: index of the type in the ThreadStatic section of the modules encoder.EmitLDR(encoder.TargetRegister.Arg1, encoder.TargetRegister.Arg1, factory.Target.PointerSize); encoder.EmitJMP(helperEntrypoint); } break; case ReadyToRunHelperId.DelegateCtor: { // This is a weird helper. Codegen populated Arg0 and Arg1 with the values that the constructor // method expects. Codegen also passed us the generic context in Arg2. // We now need to load the delegate target method into Arg2 (using a dictionary lookup) // and the optional 4th parameter, and call the ctor. Debug.Assert(contextRegister == encoder.TargetRegister.Arg2); var target = (DelegateCreationInfo)_target; EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg2, encoder.TargetRegister.Arg2, _lookupSignature, relocsOnly); if (target.Thunk != null) { Debug.Assert(target.Constructor.Method.Signature.Length == 3); encoder.EmitMOV(encoder.TargetRegister.Arg3, target.Thunk); } else { Debug.Assert(target.Constructor.Method.Signature.Length == 2); } encoder.EmitJMP(target.Constructor); } break; // These are all simple: just get the thing from the dictionary and we're done case ReadyToRunHelperId.TypeHandle: case ReadyToRunHelperId.MethodHandle: case ReadyToRunHelperId.FieldHandle: case ReadyToRunHelperId.MethodDictionary: case ReadyToRunHelperId.MethodEntry: case ReadyToRunHelperId.VirtualDispatchCell: case ReadyToRunHelperId.DefaultConstructor: case ReadyToRunHelperId.ObjectAllocator: case ReadyToRunHelperId.TypeHandleForCasting: { EmitDictionaryLookup(factory, ref encoder, contextRegister, encoder.TargetRegister.Result, _lookupSignature, relocsOnly); encoder.EmitRET(); } break; default: throw new NotImplementedException(); } } protected virtual void EmitLoadGenericContext(NodeFactory factory, ref ARM64Emitter encoder, bool relocsOnly) { // Assume generic context is already loaded in the context register. } } partial class ReadyToRunGenericLookupFromTypeNode { protected override void EmitLoadGenericContext(NodeFactory factory, ref ARM64Emitter encoder, bool relocsOnly) { // We start with context register pointing to the MethodTable Register contextRegister = GetContextRegister(ref encoder); // Locate the VTable slot that points to the dictionary int vtableSlot = 0; if (!relocsOnly) { // The concrete slot won't be known until we're emitting data - don't ask for it in relocsOnly. vtableSlot = VirtualMethodSlotHelper.GetGenericDictionarySlot(factory, (TypeDesc)_dictionaryOwner); } int pointerSize = factory.Target.PointerSize; int slotOffset = EETypeNode.GetVTableOffset(pointerSize) + (vtableSlot * pointerSize); // Load the dictionary pointer from the VTable encoder.EmitLDR(contextRegister, contextRegister, slotOffset); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using ILCompiler.DependencyAnalysis.ARM64; using Internal.TypeSystem; using Debug = System.Diagnostics.Debug; namespace ILCompiler.DependencyAnalysis { partial class ReadyToRunGenericHelperNode { protected Register GetContextRegister(ref /* readonly */ ARM64Emitter encoder) { if (_id == ReadyToRunHelperId.DelegateCtor) return encoder.TargetRegister.Arg2; else return encoder.TargetRegister.Arg0; } protected void EmitDictionaryLookup(NodeFactory factory, ref ARM64Emitter encoder, Register context, Register result, GenericLookupResult lookup, bool relocsOnly) { // INVARIANT: must not trash context register // Find the generic dictionary slot int dictionarySlot = 0; if (!relocsOnly) { // The concrete slot won't be known until we're emitting data - don't ask for it in relocsOnly. dictionarySlot = factory.GenericDictionaryLayout(_dictionaryOwner).GetSlotForEntry(lookup); } // Load the generic dictionary cell encoder.EmitLDR(result, context, dictionarySlot * factory.Target.PointerSize); switch (lookup.LookupResultReferenceType(factory)) { case GenericLookupResultReferenceType.Indirect: // Do another indirection encoder.EmitLDR(result, result); break; case GenericLookupResultReferenceType.ConditionalIndirect: // Test result, 0x1 // JEQ L1 // mov result, [result-1] // L1: throw new NotImplementedException(); default: break; } } protected sealed override void EmitCode(NodeFactory factory, ref ARM64Emitter encoder, bool relocsOnly) { // First load the generic context into the context register. EmitLoadGenericContext(factory, ref encoder, relocsOnly); Register contextRegister = GetContextRegister(ref encoder); switch (_id) { case ReadyToRunHelperId.GetNonGCStaticBase: { Debug.Assert(contextRegister == encoder.TargetRegister.Arg0); EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Result, _lookupSignature, relocsOnly); MetadataType target = (MetadataType)_target; if (!factory.PreinitializationManager.HasLazyStaticConstructor(target)) { encoder.EmitRET(); } else { // We need to trigger the cctor before returning the base. It is stored at the beginning of the non-GC statics region. encoder.EmitSUB(encoder.TargetRegister.Arg3, encoder.TargetRegister.Arg0, NonGCStaticsNode.GetClassConstructorContextSize(factory.Target)); encoder.EmitLDR(encoder.TargetRegister.Arg2, encoder.TargetRegister.Arg3, (short)factory.Target.PointerSize); encoder.EmitCMP(encoder.TargetRegister.Arg2, 1); encoder.EmitRETIfEqual(); encoder.EmitMOV(encoder.TargetRegister.Arg1, encoder.TargetRegister.Result); encoder.EmitMOV(encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg3); encoder.EmitJMP(factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnNonGCStaticBase)); } } break; case ReadyToRunHelperId.GetGCStaticBase: { Debug.Assert(contextRegister == encoder.TargetRegister.Arg0); encoder.EmitMOV(encoder.TargetRegister.Arg1, encoder.TargetRegister.Arg0); EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Result, _lookupSignature, relocsOnly); encoder.EmitLDR(encoder.TargetRegister.Result, encoder.TargetRegister.Result); MetadataType target = (MetadataType)_target; if (!factory.PreinitializationManager.HasLazyStaticConstructor(target)) { encoder.EmitRET(); } else { // We need to trigger the cctor before returning the base. It is stored at the beginning of the non-GC statics region. GenericLookupResult nonGcRegionLookup = factory.GenericLookup.TypeNonGCStaticBase(target); EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg1, encoder.TargetRegister.Arg2, nonGcRegionLookup, relocsOnly); encoder.EmitSUB(encoder.TargetRegister.Arg2, NonGCStaticsNode.GetClassConstructorContextSize(factory.Target)); encoder.EmitLDR(encoder.TargetRegister.Arg3, encoder.TargetRegister.Arg2, (short)factory.Target.PointerSize); encoder.EmitCMP(encoder.TargetRegister.Arg3, 1); encoder.EmitRETIfEqual(); encoder.EmitMOV(encoder.TargetRegister.Arg1, encoder.TargetRegister.Result); encoder.EmitMOV(encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg2); encoder.EmitJMP(factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnGCStaticBase)); } } break; case ReadyToRunHelperId.GetThreadStaticBase: { Debug.Assert(contextRegister == encoder.TargetRegister.Arg0); MetadataType target = (MetadataType)_target; // Look up the index cell EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg1, _lookupSignature, relocsOnly); ISymbolNode helperEntrypoint; if (factory.PreinitializationManager.HasLazyStaticConstructor(target)) { // There is a lazy class constructor. We need the non-GC static base because that's where the // class constructor context lives. GenericLookupResult nonGcRegionLookup = factory.GenericLookup.TypeNonGCStaticBase(target); EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg2, nonGcRegionLookup, relocsOnly); int cctorContextSize = NonGCStaticsNode.GetClassConstructorContextSize(factory.Target); encoder.EmitSUB(encoder.TargetRegister.Arg2, cctorContextSize); helperEntrypoint = factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnThreadStaticBase); } else { helperEntrypoint = factory.HelperEntrypoint(HelperEntrypoint.GetThreadStaticBaseForType); } // First arg: address of the TypeManager slot that provides the helper with // information about module index and the type manager instance (which is used // for initialization on first access). encoder.EmitLDR(encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg1); // Second arg: index of the type in the ThreadStatic section of the modules encoder.EmitLDR(encoder.TargetRegister.Arg1, encoder.TargetRegister.Arg1, factory.Target.PointerSize); encoder.EmitJMP(helperEntrypoint); } break; case ReadyToRunHelperId.DelegateCtor: { // This is a weird helper. Codegen populated Arg0 and Arg1 with the values that the constructor // method expects. Codegen also passed us the generic context in Arg2. // We now need to load the delegate target method into Arg2 (using a dictionary lookup) // and the optional 4th parameter, and call the ctor. Debug.Assert(contextRegister == encoder.TargetRegister.Arg2); var target = (DelegateCreationInfo)_target; EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg2, encoder.TargetRegister.Arg2, _lookupSignature, relocsOnly); if (target.Thunk != null) { Debug.Assert(target.Constructor.Method.Signature.Length == 3); encoder.EmitMOV(encoder.TargetRegister.Arg3, target.Thunk); } else { Debug.Assert(target.Constructor.Method.Signature.Length == 2); } encoder.EmitJMP(target.Constructor); } break; // These are all simple: just get the thing from the dictionary and we're done case ReadyToRunHelperId.TypeHandle: case ReadyToRunHelperId.MethodHandle: case ReadyToRunHelperId.FieldHandle: case ReadyToRunHelperId.MethodDictionary: case ReadyToRunHelperId.MethodEntry: case ReadyToRunHelperId.VirtualDispatchCell: case ReadyToRunHelperId.DefaultConstructor: case ReadyToRunHelperId.ObjectAllocator: case ReadyToRunHelperId.TypeHandleForCasting: case ReadyToRunHelperId.ConstrainedDirectCall: { EmitDictionaryLookup(factory, ref encoder, contextRegister, encoder.TargetRegister.Result, _lookupSignature, relocsOnly); encoder.EmitRET(); } break; default: throw new NotImplementedException(); } } protected virtual void EmitLoadGenericContext(NodeFactory factory, ref ARM64Emitter encoder, bool relocsOnly) { // Assume generic context is already loaded in the context register. } } partial class ReadyToRunGenericLookupFromTypeNode { protected override void EmitLoadGenericContext(NodeFactory factory, ref ARM64Emitter encoder, bool relocsOnly) { // We start with context register pointing to the MethodTable Register contextRegister = GetContextRegister(ref encoder); // Locate the VTable slot that points to the dictionary int vtableSlot = 0; if (!relocsOnly) { // The concrete slot won't be known until we're emitting data - don't ask for it in relocsOnly. vtableSlot = VirtualMethodSlotHelper.GetGenericDictionarySlot(factory, (TypeDesc)_dictionaryOwner); } int pointerSize = factory.Target.PointerSize; int slotOffset = EETypeNode.GetVTableOffset(pointerSize) + (vtableSlot * pointerSize); // Load the dictionary pointer from the VTable encoder.EmitLDR(contextRegister, contextRegister, slotOffset); } } }
1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/Target_X64/X64ReadyToRunGenericHelperNode.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using ILCompiler.DependencyAnalysis.X64; using Internal.TypeSystem; using Debug = System.Diagnostics.Debug; namespace ILCompiler.DependencyAnalysis { partial class ReadyToRunGenericHelperNode { protected Register GetContextRegister(ref /* readonly */ X64Emitter encoder) { if (_id == ReadyToRunHelperId.DelegateCtor) return encoder.TargetRegister.Arg2; else return encoder.TargetRegister.Arg0; } protected void EmitDictionaryLookup(NodeFactory factory, ref X64Emitter encoder, Register context, Register result, GenericLookupResult lookup, bool relocsOnly) { // INVARIANT: must not trash context register // Find the generic dictionary slot int dictionarySlot = 0; if (!relocsOnly) { // The concrete slot won't be known until we're emitting data - don't ask for it in relocsOnly. dictionarySlot = factory.GenericDictionaryLayout(_dictionaryOwner).GetSlotForEntry(lookup); } // Load the generic dictionary cell AddrMode loadEntry = new AddrMode( context, null, dictionarySlot * factory.Target.PointerSize, 0, AddrModeSize.Int64); encoder.EmitMOV(result, ref loadEntry); // If there's any invalid entries, we need to test for them // // Only do this in relocsOnly to make it easier to weed out bugs - the _hasInvalidEntries // flag can change over the course of compilation and the bad slot helper dependency // should be reported by someone else - the system should not rely on it coming from here. if (!relocsOnly && _hasInvalidEntries) { encoder.EmitCompareToZero(result); encoder.EmitJE(GetBadSlotHelper(factory)); } switch (lookup.LookupResultReferenceType(factory)) { case GenericLookupResultReferenceType.Indirect: // Do another indirection loadEntry = new AddrMode(result, null, 0, 0, AddrModeSize.Int64); encoder.EmitMOV(result, ref loadEntry); break; case GenericLookupResultReferenceType.ConditionalIndirect: // Test result, 0x1 // JEQ L1 // mov result, [result-1] // L1: throw new NotImplementedException(); default: break; } } protected sealed override void EmitCode(NodeFactory factory, ref X64Emitter encoder, bool relocsOnly) { // First load the generic context into the context register. EmitLoadGenericContext(factory, ref encoder, relocsOnly); Register contextRegister = GetContextRegister(ref encoder); switch (_id) { case ReadyToRunHelperId.GetNonGCStaticBase: { Debug.Assert(contextRegister == encoder.TargetRegister.Arg0); MetadataType target = (MetadataType)_target; if (!factory.PreinitializationManager.HasLazyStaticConstructor(target)) { EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Result, _lookupSignature, relocsOnly); encoder.EmitRET(); } else { EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg0, _lookupSignature, relocsOnly); encoder.EmitMOV(encoder.TargetRegister.Result, encoder.TargetRegister.Arg0); // We need to trigger the cctor before returning the base. It is stored at the beginning of the non-GC statics region. int cctorContextSize = NonGCStaticsNode.GetClassConstructorContextSize(factory.Target); AddrMode initialized = new AddrMode(encoder.TargetRegister.Arg0, null, factory.Target.PointerSize - cctorContextSize, 0, AddrModeSize.Int32); encoder.EmitCMP(ref initialized, 1); encoder.EmitRETIfEqual(); AddrMode loadCctor = new AddrMode(encoder.TargetRegister.Arg0, null, -cctorContextSize, 0, AddrModeSize.Int64); encoder.EmitLEA(encoder.TargetRegister.Arg0, ref loadCctor); encoder.EmitMOV(encoder.TargetRegister.Arg1, encoder.TargetRegister.Result); encoder.EmitJMP(factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnNonGCStaticBase)); } } break; case ReadyToRunHelperId.GetGCStaticBase: { Debug.Assert(contextRegister == encoder.TargetRegister.Arg0); MetadataType target = (MetadataType)_target; EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Result, _lookupSignature, relocsOnly); AddrMode loadFromResult = new AddrMode(encoder.TargetRegister.Result, null, 0, 0, AddrModeSize.Int64); encoder.EmitMOV(encoder.TargetRegister.Result, ref loadFromResult); if (!factory.PreinitializationManager.HasLazyStaticConstructor(target)) { encoder.EmitRET(); } else { // We need to trigger the cctor before returning the base. It is stored at the beginning of the non-GC statics region. GenericLookupResult nonGcRegionLookup = factory.GenericLookup.TypeNonGCStaticBase(target); EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg0, nonGcRegionLookup, relocsOnly); int cctorContextSize = NonGCStaticsNode.GetClassConstructorContextSize(factory.Target); AddrMode initialized = new AddrMode(encoder.TargetRegister.Arg0, null, factory.Target.PointerSize - cctorContextSize, 0, AddrModeSize.Int32); encoder.EmitCMP(ref initialized, 1); encoder.EmitRETIfEqual(); encoder.EmitMOV(encoder.TargetRegister.Arg1, encoder.TargetRegister.Result); AddrMode loadCctor = new AddrMode(encoder.TargetRegister.Arg0, null, -cctorContextSize, 0, AddrModeSize.Int64); encoder.EmitLEA(encoder.TargetRegister.Arg0, ref loadCctor); encoder.EmitJMP(factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnGCStaticBase)); } } break; case ReadyToRunHelperId.GetThreadStaticBase: { Debug.Assert(contextRegister == encoder.TargetRegister.Arg0); MetadataType target = (MetadataType)_target; // Look up the index cell EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg1, _lookupSignature, relocsOnly); ISymbolNode helperEntrypoint; if (factory.PreinitializationManager.HasLazyStaticConstructor(target)) { // There is a lazy class constructor. We need the non-GC static base because that's where the // class constructor context lives. GenericLookupResult nonGcRegionLookup = factory.GenericLookup.TypeNonGCStaticBase(target); EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg2, nonGcRegionLookup, relocsOnly); int cctorContextSize = NonGCStaticsNode.GetClassConstructorContextSize(factory.Target); AddrMode loadCctor = new AddrMode(encoder.TargetRegister.Arg2, null, -cctorContextSize, 0, AddrModeSize.Int64); encoder.EmitLEA(encoder.TargetRegister.Arg2, ref loadCctor); helperEntrypoint = factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnThreadStaticBase); } else { helperEntrypoint = factory.HelperEntrypoint(HelperEntrypoint.GetThreadStaticBaseForType); } // First arg: address of the TypeManager slot that provides the helper with // information about module index and the type manager instance (which is used // for initialization on first access). AddrMode loadFromArg1 = new AddrMode(encoder.TargetRegister.Arg1, null, 0, 0, AddrModeSize.Int64); encoder.EmitMOV(encoder.TargetRegister.Arg0, ref loadFromArg1); // Second arg: index of the type in the ThreadStatic section of the modules AddrMode loadFromArg1AndDelta = new AddrMode(encoder.TargetRegister.Arg1, null, factory.Target.PointerSize, 0, AddrModeSize.Int64); encoder.EmitMOV(encoder.TargetRegister.Arg1, ref loadFromArg1AndDelta); encoder.EmitJMP(helperEntrypoint); } break; case ReadyToRunHelperId.DelegateCtor: { // This is a weird helper. Codegen populated Arg0 and Arg1 with the values that the constructor // method expects. Codegen also passed us the generic context in Arg2. // We now need to load the delegate target method into Arg2 (using a dictionary lookup) // and the optional 4th parameter, and call the ctor. Debug.Assert(contextRegister == encoder.TargetRegister.Arg2); var target = (DelegateCreationInfo)_target; EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg2, encoder.TargetRegister.Arg2, _lookupSignature, relocsOnly); if (target.Thunk != null) { Debug.Assert(target.Constructor.Method.Signature.Length == 3); encoder.EmitLEAQ(encoder.TargetRegister.Arg3, target.Thunk); } else { Debug.Assert(target.Constructor.Method.Signature.Length == 2); } encoder.EmitJMP(target.Constructor); } break; // These are all simple: just get the thing from the dictionary and we're done case ReadyToRunHelperId.TypeHandle: case ReadyToRunHelperId.MethodHandle: case ReadyToRunHelperId.FieldHandle: case ReadyToRunHelperId.MethodDictionary: case ReadyToRunHelperId.MethodEntry: case ReadyToRunHelperId.VirtualDispatchCell: case ReadyToRunHelperId.DefaultConstructor: case ReadyToRunHelperId.ObjectAllocator: case ReadyToRunHelperId.TypeHandleForCasting: { EmitDictionaryLookup(factory, ref encoder, contextRegister, encoder.TargetRegister.Result, _lookupSignature, relocsOnly); encoder.EmitRET(); } break; default: throw new NotImplementedException(); } } protected virtual void EmitLoadGenericContext(NodeFactory factory, ref X64Emitter encoder, bool relocsOnly) { // Assume generic context is already loaded in the context register. } } partial class ReadyToRunGenericLookupFromTypeNode { protected override void EmitLoadGenericContext(NodeFactory factory, ref X64Emitter encoder, bool relocsOnly) { // We start with context register pointing to the MethodTable Register contextRegister = GetContextRegister(ref encoder); // Locate the VTable slot that points to the dictionary int vtableSlot = 0; if (!relocsOnly) { // The concrete slot won't be known until we're emitting data - don't ask for it in relocsOnly. vtableSlot = VirtualMethodSlotHelper.GetGenericDictionarySlot(factory, (TypeDesc)_dictionaryOwner); } int pointerSize = factory.Target.PointerSize; int slotOffset = EETypeNode.GetVTableOffset(pointerSize) + (vtableSlot * pointerSize); // Load the dictionary pointer from the VTable AddrMode loadDictionary = new AddrMode(contextRegister, null, slotOffset, 0, AddrModeSize.Int64); encoder.EmitMOV(contextRegister, ref loadDictionary); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using ILCompiler.DependencyAnalysis.X64; using Internal.TypeSystem; using Debug = System.Diagnostics.Debug; namespace ILCompiler.DependencyAnalysis { partial class ReadyToRunGenericHelperNode { protected Register GetContextRegister(ref /* readonly */ X64Emitter encoder) { if (_id == ReadyToRunHelperId.DelegateCtor) return encoder.TargetRegister.Arg2; else return encoder.TargetRegister.Arg0; } protected void EmitDictionaryLookup(NodeFactory factory, ref X64Emitter encoder, Register context, Register result, GenericLookupResult lookup, bool relocsOnly) { // INVARIANT: must not trash context register // Find the generic dictionary slot int dictionarySlot = 0; if (!relocsOnly) { // The concrete slot won't be known until we're emitting data - don't ask for it in relocsOnly. dictionarySlot = factory.GenericDictionaryLayout(_dictionaryOwner).GetSlotForEntry(lookup); } // Load the generic dictionary cell AddrMode loadEntry = new AddrMode( context, null, dictionarySlot * factory.Target.PointerSize, 0, AddrModeSize.Int64); encoder.EmitMOV(result, ref loadEntry); // If there's any invalid entries, we need to test for them // // Only do this in relocsOnly to make it easier to weed out bugs - the _hasInvalidEntries // flag can change over the course of compilation and the bad slot helper dependency // should be reported by someone else - the system should not rely on it coming from here. if (!relocsOnly && _hasInvalidEntries) { encoder.EmitCompareToZero(result); encoder.EmitJE(GetBadSlotHelper(factory)); } switch (lookup.LookupResultReferenceType(factory)) { case GenericLookupResultReferenceType.Indirect: // Do another indirection loadEntry = new AddrMode(result, null, 0, 0, AddrModeSize.Int64); encoder.EmitMOV(result, ref loadEntry); break; case GenericLookupResultReferenceType.ConditionalIndirect: // Test result, 0x1 // JEQ L1 // mov result, [result-1] // L1: throw new NotImplementedException(); default: break; } } protected sealed override void EmitCode(NodeFactory factory, ref X64Emitter encoder, bool relocsOnly) { // First load the generic context into the context register. EmitLoadGenericContext(factory, ref encoder, relocsOnly); Register contextRegister = GetContextRegister(ref encoder); switch (_id) { case ReadyToRunHelperId.GetNonGCStaticBase: { Debug.Assert(contextRegister == encoder.TargetRegister.Arg0); MetadataType target = (MetadataType)_target; if (!factory.PreinitializationManager.HasLazyStaticConstructor(target)) { EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Result, _lookupSignature, relocsOnly); encoder.EmitRET(); } else { EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg0, _lookupSignature, relocsOnly); encoder.EmitMOV(encoder.TargetRegister.Result, encoder.TargetRegister.Arg0); // We need to trigger the cctor before returning the base. It is stored at the beginning of the non-GC statics region. int cctorContextSize = NonGCStaticsNode.GetClassConstructorContextSize(factory.Target); AddrMode initialized = new AddrMode(encoder.TargetRegister.Arg0, null, factory.Target.PointerSize - cctorContextSize, 0, AddrModeSize.Int32); encoder.EmitCMP(ref initialized, 1); encoder.EmitRETIfEqual(); AddrMode loadCctor = new AddrMode(encoder.TargetRegister.Arg0, null, -cctorContextSize, 0, AddrModeSize.Int64); encoder.EmitLEA(encoder.TargetRegister.Arg0, ref loadCctor); encoder.EmitMOV(encoder.TargetRegister.Arg1, encoder.TargetRegister.Result); encoder.EmitJMP(factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnNonGCStaticBase)); } } break; case ReadyToRunHelperId.GetGCStaticBase: { Debug.Assert(contextRegister == encoder.TargetRegister.Arg0); MetadataType target = (MetadataType)_target; EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Result, _lookupSignature, relocsOnly); AddrMode loadFromResult = new AddrMode(encoder.TargetRegister.Result, null, 0, 0, AddrModeSize.Int64); encoder.EmitMOV(encoder.TargetRegister.Result, ref loadFromResult); if (!factory.PreinitializationManager.HasLazyStaticConstructor(target)) { encoder.EmitRET(); } else { // We need to trigger the cctor before returning the base. It is stored at the beginning of the non-GC statics region. GenericLookupResult nonGcRegionLookup = factory.GenericLookup.TypeNonGCStaticBase(target); EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg0, nonGcRegionLookup, relocsOnly); int cctorContextSize = NonGCStaticsNode.GetClassConstructorContextSize(factory.Target); AddrMode initialized = new AddrMode(encoder.TargetRegister.Arg0, null, factory.Target.PointerSize - cctorContextSize, 0, AddrModeSize.Int32); encoder.EmitCMP(ref initialized, 1); encoder.EmitRETIfEqual(); encoder.EmitMOV(encoder.TargetRegister.Arg1, encoder.TargetRegister.Result); AddrMode loadCctor = new AddrMode(encoder.TargetRegister.Arg0, null, -cctorContextSize, 0, AddrModeSize.Int64); encoder.EmitLEA(encoder.TargetRegister.Arg0, ref loadCctor); encoder.EmitJMP(factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnGCStaticBase)); } } break; case ReadyToRunHelperId.GetThreadStaticBase: { Debug.Assert(contextRegister == encoder.TargetRegister.Arg0); MetadataType target = (MetadataType)_target; // Look up the index cell EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg1, _lookupSignature, relocsOnly); ISymbolNode helperEntrypoint; if (factory.PreinitializationManager.HasLazyStaticConstructor(target)) { // There is a lazy class constructor. We need the non-GC static base because that's where the // class constructor context lives. GenericLookupResult nonGcRegionLookup = factory.GenericLookup.TypeNonGCStaticBase(target); EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg2, nonGcRegionLookup, relocsOnly); int cctorContextSize = NonGCStaticsNode.GetClassConstructorContextSize(factory.Target); AddrMode loadCctor = new AddrMode(encoder.TargetRegister.Arg2, null, -cctorContextSize, 0, AddrModeSize.Int64); encoder.EmitLEA(encoder.TargetRegister.Arg2, ref loadCctor); helperEntrypoint = factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnThreadStaticBase); } else { helperEntrypoint = factory.HelperEntrypoint(HelperEntrypoint.GetThreadStaticBaseForType); } // First arg: address of the TypeManager slot that provides the helper with // information about module index and the type manager instance (which is used // for initialization on first access). AddrMode loadFromArg1 = new AddrMode(encoder.TargetRegister.Arg1, null, 0, 0, AddrModeSize.Int64); encoder.EmitMOV(encoder.TargetRegister.Arg0, ref loadFromArg1); // Second arg: index of the type in the ThreadStatic section of the modules AddrMode loadFromArg1AndDelta = new AddrMode(encoder.TargetRegister.Arg1, null, factory.Target.PointerSize, 0, AddrModeSize.Int64); encoder.EmitMOV(encoder.TargetRegister.Arg1, ref loadFromArg1AndDelta); encoder.EmitJMP(helperEntrypoint); } break; case ReadyToRunHelperId.DelegateCtor: { // This is a weird helper. Codegen populated Arg0 and Arg1 with the values that the constructor // method expects. Codegen also passed us the generic context in Arg2. // We now need to load the delegate target method into Arg2 (using a dictionary lookup) // and the optional 4th parameter, and call the ctor. Debug.Assert(contextRegister == encoder.TargetRegister.Arg2); var target = (DelegateCreationInfo)_target; EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg2, encoder.TargetRegister.Arg2, _lookupSignature, relocsOnly); if (target.Thunk != null) { Debug.Assert(target.Constructor.Method.Signature.Length == 3); encoder.EmitLEAQ(encoder.TargetRegister.Arg3, target.Thunk); } else { Debug.Assert(target.Constructor.Method.Signature.Length == 2); } encoder.EmitJMP(target.Constructor); } break; // These are all simple: just get the thing from the dictionary and we're done case ReadyToRunHelperId.TypeHandle: case ReadyToRunHelperId.MethodHandle: case ReadyToRunHelperId.FieldHandle: case ReadyToRunHelperId.MethodDictionary: case ReadyToRunHelperId.MethodEntry: case ReadyToRunHelperId.VirtualDispatchCell: case ReadyToRunHelperId.DefaultConstructor: case ReadyToRunHelperId.ObjectAllocator: case ReadyToRunHelperId.TypeHandleForCasting: case ReadyToRunHelperId.ConstrainedDirectCall: { EmitDictionaryLookup(factory, ref encoder, contextRegister, encoder.TargetRegister.Result, _lookupSignature, relocsOnly); encoder.EmitRET(); } break; default: throw new NotImplementedException(); } } protected virtual void EmitLoadGenericContext(NodeFactory factory, ref X64Emitter encoder, bool relocsOnly) { // Assume generic context is already loaded in the context register. } } partial class ReadyToRunGenericLookupFromTypeNode { protected override void EmitLoadGenericContext(NodeFactory factory, ref X64Emitter encoder, bool relocsOnly) { // We start with context register pointing to the MethodTable Register contextRegister = GetContextRegister(ref encoder); // Locate the VTable slot that points to the dictionary int vtableSlot = 0; if (!relocsOnly) { // The concrete slot won't be known until we're emitting data - don't ask for it in relocsOnly. vtableSlot = VirtualMethodSlotHelper.GetGenericDictionarySlot(factory, (TypeDesc)_dictionaryOwner); } int pointerSize = factory.Target.PointerSize; int slotOffset = EETypeNode.GetVTableOffset(pointerSize) + (vtableSlot * pointerSize); // Load the dictionary pointer from the VTable AddrMode loadDictionary = new AddrMode(contextRegister, null, slotOffset, 0, AddrModeSize.Int64); encoder.EmitMOV(contextRegister, ref loadDictionary); } } }
1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/VTableSliceNode.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using Internal.TypeSystem; using ILCompiler.DependencyAnalysisFramework; using Debug = System.Diagnostics.Debug; namespace ILCompiler.DependencyAnalysis { /// <summary> /// Represents the VTable for a type's slice. For example, System.String's VTableSliceNode includes virtual /// slots added by System.String itself, System.Object's VTableSliceNode contains the virtuals it defines. /// </summary> public abstract class VTableSliceNode : DependencyNodeCore<NodeFactory> { protected TypeDesc _type; public VTableSliceNode(TypeDesc type) { Debug.Assert(!type.IsArray, "Wanted to call GetClosestDefType?"); _type = type; } public abstract IReadOnlyList<MethodDesc> Slots { get; } public TypeDesc Type => _type; /// <summary> /// Gets a value indicating whether the slots are assigned at the beginning of the compilation. /// </summary> public abstract bool HasFixedSlots { get; } protected override string GetName(NodeFactory factory) => $"__vtable_{factory.NameMangler.GetMangledTypeName(_type).ToString()}"; public override bool StaticDependenciesAreComputed => true; public override IEnumerable<DependencyListEntry> GetStaticDependencies(NodeFactory factory) { if (_type.HasBaseType) { return new[] { new DependencyListEntry(factory.VTable(_type.BaseType), "Base type VTable") }; } return null; } public override IEnumerable<CombinedDependencyListEntry> GetConditionalStaticDependencies(NodeFactory factory) => null; public override IEnumerable<CombinedDependencyListEntry> SearchDynamicDependencies(List<DependencyNodeCore<NodeFactory>> markedNodes, int firstNode, NodeFactory factory) => null; public override bool InterestingForDynamicDependencyAnalysis => false; public override bool HasDynamicDependencies => false; public override bool HasConditionalStaticDependencies => false; } /// <summary> /// Represents a VTable slice with fixed slots whose assignment was determined at the time the slice was allocated. /// </summary> internal class PrecomputedVTableSliceNode : VTableSliceNode { private readonly IReadOnlyList<MethodDesc> _slots; public PrecomputedVTableSliceNode(TypeDesc type, IReadOnlyList<MethodDesc> slots) : base(type) { _slots = slots; } public override IReadOnlyList<MethodDesc> Slots { get { return _slots; } } public override bool HasFixedSlots { get { return true; } } } /// <summary> /// Represents a VTable slice for a complete type - a type with all virtual method slots generated, /// irrespective of whether they are used. /// </summary> internal sealed class EagerlyBuiltVTableSliceNode : PrecomputedVTableSliceNode { public EagerlyBuiltVTableSliceNode(TypeDesc type) : base(type, ComputeSlots(type)) { } private static IReadOnlyList<MethodDesc> ComputeSlots(TypeDesc type) { var slots = new ArrayBuilder<MethodDesc>(); bool isObjectType = type.IsObject; DefType defType = type.GetClosestDefType(); IEnumerable<MethodDesc> allSlots = type.IsInterface ? type.GetAllVirtualMethods() : defType.EnumAllVirtualSlots(); foreach (var method in allSlots) { // Static virtual methods don't go in vtables if (method.Signature.IsStatic) continue; // GVMs are not emitted in the type's vtable. if (method.HasInstantiation) continue; // Finalizers are called via a field on the MethodTable, not through the VTable if (isObjectType && method.Name == "Finalize") continue; // Current type doesn't define this slot. if (method.OwningType != defType) continue; slots.Add(method); } return slots.ToArray(); } } /// <summary> /// Represents a VTable slice where slots are built on demand. Only the slots that are actually used /// will be generated. /// </summary> internal sealed class LazilyBuiltVTableSliceNode : VTableSliceNode { private HashSet<MethodDesc> _usedMethods = new HashSet<MethodDesc>(); private MethodDesc[] _slots; public LazilyBuiltVTableSliceNode(TypeDesc type) : base(type) { } public override IReadOnlyList<MethodDesc> Slots { get { if (_slots == null) { // Sort the lazily populated slots in metadata order (the order in which they show up // in GetAllMethods()). // This ensures that Foo<string> and Foo<object> will end up with the same vtable // no matter the order in which VirtualMethodUse nodes populated it. ArrayBuilder<MethodDesc> slotsBuilder = new ArrayBuilder<MethodDesc>(); DefType defType = _type.GetClosestDefType(); foreach (var method in defType.GetAllMethods()) { if (_usedMethods.Contains(method)) slotsBuilder.Add(method); } Debug.Assert(_usedMethods.Count == slotsBuilder.Count); _slots = slotsBuilder.ToArray(); // Null out used methods so that we AV if someone tries to add now. _usedMethods = null; } return _slots; } } public override bool HasFixedSlots { get { return false; } } public void AddEntry(NodeFactory factory, MethodDesc virtualMethod) { // GVMs are not emitted in the type's vtable. Debug.Assert(!virtualMethod.HasInstantiation); Debug.Assert(virtualMethod.IsVirtual); Debug.Assert(_slots == null && _usedMethods != null); Debug.Assert(virtualMethod.OwningType == _type); // Finalizers are called via a field on the MethodTable, not through the VTable if (_type.IsObject && virtualMethod.Name == "Finalize") return; _usedMethods.Add(virtualMethod); } public override bool HasConditionalStaticDependencies { get { return _type.ConvertToCanonForm(CanonicalFormKind.Specific) != _type; } } public override IEnumerable<CombinedDependencyListEntry> GetConditionalStaticDependencies(NodeFactory factory) { // VirtualMethodUse of Foo<SomeType>.Method will bring in VirtualMethodUse // of Foo<__Canon>.Method. This in turn should bring in Foo<OtherType>.Method. DefType defType = _type.GetClosestDefType(); IEnumerable<MethodDesc> allSlots = _type.IsInterface ? _type.GetAllVirtualMethods() : defType.EnumAllVirtualSlots(); foreach (var method in allSlots) { // Generic virtual methods are tracked by an orthogonal mechanism. if (method.HasInstantiation) continue; // Current type doesn't define this slot. Another VTableSlice will take care of this. if (method.OwningType != defType) continue; if (defType.Context.SupportsCanon) yield return new CombinedDependencyListEntry( factory.VirtualMethodUse(method), factory.VirtualMethodUse(method.GetCanonMethodTarget(CanonicalFormKind.Specific)), "Canonically equivalent virtual method use"); if (defType.Context.SupportsUniversalCanon) yield return new CombinedDependencyListEntry( factory.VirtualMethodUse(method), factory.VirtualMethodUse(method.GetCanonMethodTarget(CanonicalFormKind.Universal)), "Universal Canonically equivalent virtual method use"); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using Internal.TypeSystem; using ILCompiler.DependencyAnalysisFramework; using Debug = System.Diagnostics.Debug; namespace ILCompiler.DependencyAnalysis { /// <summary> /// Represents the VTable for a type's slice. For example, System.String's VTableSliceNode includes virtual /// slots added by System.String itself, System.Object's VTableSliceNode contains the virtuals it defines. /// </summary> public abstract class VTableSliceNode : DependencyNodeCore<NodeFactory> { protected TypeDesc _type; public VTableSliceNode(TypeDesc type) { Debug.Assert(!type.IsArray, "Wanted to call GetClosestDefType?"); _type = type; } public abstract IReadOnlyList<MethodDesc> Slots { get; } public TypeDesc Type => _type; /// <summary> /// Gets a value indicating whether the slots are assigned at the beginning of the compilation. /// </summary> public abstract bool HasFixedSlots { get; } protected override string GetName(NodeFactory factory) => $"__vtable_{factory.NameMangler.GetMangledTypeName(_type).ToString()}"; public override bool StaticDependenciesAreComputed => true; public override IEnumerable<DependencyListEntry> GetStaticDependencies(NodeFactory factory) { if (_type.HasBaseType) { return new[] { new DependencyListEntry(factory.VTable(_type.BaseType), "Base type VTable") }; } return null; } public override IEnumerable<CombinedDependencyListEntry> GetConditionalStaticDependencies(NodeFactory factory) => null; public override IEnumerable<CombinedDependencyListEntry> SearchDynamicDependencies(List<DependencyNodeCore<NodeFactory>> markedNodes, int firstNode, NodeFactory factory) => null; public override bool InterestingForDynamicDependencyAnalysis => false; public override bool HasDynamicDependencies => false; public override bool HasConditionalStaticDependencies => false; } /// <summary> /// Represents a VTable slice with fixed slots whose assignment was determined at the time the slice was allocated. /// </summary> internal class PrecomputedVTableSliceNode : VTableSliceNode { private readonly IReadOnlyList<MethodDesc> _slots; public PrecomputedVTableSliceNode(TypeDesc type, IReadOnlyList<MethodDesc> slots) : base(type) { _slots = slots; } public override IReadOnlyList<MethodDesc> Slots { get { return _slots; } } public override bool HasFixedSlots { get { return true; } } } /// <summary> /// Represents a VTable slice for a complete type - a type with all virtual method slots generated, /// irrespective of whether they are used. /// </summary> internal sealed class EagerlyBuiltVTableSliceNode : PrecomputedVTableSliceNode { public EagerlyBuiltVTableSliceNode(TypeDesc type) : base(type, ComputeSlots(type)) { } private static IReadOnlyList<MethodDesc> ComputeSlots(TypeDesc type) { var slots = new ArrayBuilder<MethodDesc>(); bool isObjectType = type.IsObject; DefType defType = type.GetClosestDefType(); IEnumerable<MethodDesc> allSlots = type.IsInterface ? type.GetAllVirtualMethods() : defType.EnumAllVirtualSlots(); foreach (var method in allSlots) { // Static virtual methods don't go in vtables if (method.Signature.IsStatic) continue; // GVMs are not emitted in the type's vtable. if (method.HasInstantiation) continue; // Finalizers are called via a field on the MethodTable, not through the VTable if (isObjectType && method.Name == "Finalize") continue; // Current type doesn't define this slot. if (method.OwningType != defType) continue; slots.Add(method); } return slots.ToArray(); } } /// <summary> /// Represents a VTable slice where slots are built on demand. Only the slots that are actually used /// will be generated. /// </summary> internal sealed class LazilyBuiltVTableSliceNode : VTableSliceNode { private HashSet<MethodDesc> _usedMethods = new HashSet<MethodDesc>(); private MethodDesc[] _slots; public LazilyBuiltVTableSliceNode(TypeDesc type) : base(type) { } public override IReadOnlyList<MethodDesc> Slots { get { if (_slots == null) { // Sort the lazily populated slots in metadata order (the order in which they show up // in GetAllMethods()). // This ensures that Foo<string> and Foo<object> will end up with the same vtable // no matter the order in which VirtualMethodUse nodes populated it. ArrayBuilder<MethodDesc> slotsBuilder = new ArrayBuilder<MethodDesc>(); DefType defType = _type.GetClosestDefType(); foreach (var method in defType.GetAllMethods()) { if (_usedMethods.Contains(method)) slotsBuilder.Add(method); } Debug.Assert(_usedMethods.Count == slotsBuilder.Count); _slots = slotsBuilder.ToArray(); // Null out used methods so that we AV if someone tries to add now. _usedMethods = null; } return _slots; } } public override bool HasFixedSlots { get { return false; } } public void AddEntry(NodeFactory factory, MethodDesc virtualMethod) { // GVMs are not emitted in the type's vtable. Debug.Assert(!virtualMethod.HasInstantiation); Debug.Assert(virtualMethod.IsVirtual); Debug.Assert(_slots == null && _usedMethods != null); Debug.Assert(virtualMethod.OwningType == _type); // Finalizers are called via a field on the MethodTable, not through the VTable if (_type.IsObject && virtualMethod.Name == "Finalize") return; _usedMethods.Add(virtualMethod); } public override bool HasConditionalStaticDependencies { get { return _type.ConvertToCanonForm(CanonicalFormKind.Specific) != _type; } } public override IEnumerable<CombinedDependencyListEntry> GetConditionalStaticDependencies(NodeFactory factory) { // VirtualMethodUse of Foo<SomeType>.Method will bring in VirtualMethodUse // of Foo<__Canon>.Method. This in turn should bring in Foo<OtherType>.Method. DefType defType = _type.GetClosestDefType(); IEnumerable<MethodDesc> allSlots = _type.IsInterface ? _type.GetAllVirtualMethods() : defType.EnumAllVirtualSlots(); foreach (var method in allSlots) { // Generic virtual methods are tracked by an orthogonal mechanism. if (method.HasInstantiation) continue; // Static interface methods don't go into vtables if (method.Signature.IsStatic) continue; // Current type doesn't define this slot. Another VTableSlice will take care of this. if (method.OwningType != defType) continue; if (defType.Context.SupportsCanon) yield return new CombinedDependencyListEntry( factory.VirtualMethodUse(method), factory.VirtualMethodUse(method.GetCanonMethodTarget(CanonicalFormKind.Specific)), "Canonically equivalent virtual method use"); if (defType.Context.SupportsUniversalCanon) yield return new CombinedDependencyListEntry( factory.VirtualMethodUse(method), factory.VirtualMethodUse(method.GetCanonMethodTarget(CanonicalFormKind.Universal)), "Universal Canonically equivalent virtual method use"); } } } }
1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/coreclr/tools/aot/ILCompiler.Compiler/IL/ILImporter.Scanner.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using Internal.TypeSystem; using Internal.ReadyToRunConstants; using ILCompiler; using ILCompiler.DependencyAnalysis; using Debug = System.Diagnostics.Debug; using DependencyList = ILCompiler.DependencyAnalysisFramework.DependencyNodeCore<ILCompiler.DependencyAnalysis.NodeFactory>.DependencyList; namespace Internal.IL { // Implements an IL scanner that scans method bodies to be compiled by the code generation // backend before the actual compilation happens to gain insights into the code. partial class ILImporter { private readonly MethodIL _methodIL; private readonly MethodIL _canonMethodIL; private readonly ILScanner _compilation; private readonly ILScanNodeFactory _factory; // True if we're scanning a throwing method body because scanning the real body failed. private readonly bool _isFallbackBodyCompilation; private readonly MethodDesc _canonMethod; private DependencyList _dependencies = new DependencyList(); private readonly byte[] _ilBytes; private class BasicBlock { // Common fields public enum ImportState : byte { Unmarked, IsPending } public BasicBlock Next; public int StartOffset; public ImportState State = ImportState.Unmarked; public bool TryStart; public bool FilterStart; public bool HandlerStart; } private bool _isReadOnly; private TypeDesc _constrained; private int _currentInstructionOffset; private int _previousInstructionOffset; private class ExceptionRegion { public ILExceptionRegion ILRegion; } private ExceptionRegion[] _exceptionRegions; public ILImporter(ILScanner compilation, MethodDesc method, MethodIL methodIL = null) { if (methodIL == null) { methodIL = compilation.GetMethodIL(method); } else { _isFallbackBodyCompilation = true; } // This is e.g. an "extern" method in C# without a DllImport or InternalCall. if (methodIL == null) { ThrowHelper.ThrowInvalidProgramException(ExceptionStringID.InvalidProgramSpecific, method); } _compilation = compilation; _factory = (ILScanNodeFactory)compilation.NodeFactory; _ilBytes = methodIL.GetILBytes(); _canonMethodIL = methodIL; // Get the runtime determined method IL so that this works right in shared code // and tokens in shared code resolve to runtime determined types. MethodIL uninstantiatiedMethodIL = methodIL.GetMethodILDefinition(); if (methodIL != uninstantiatiedMethodIL) { MethodDesc sharedMethod = method.GetSharedRuntimeFormMethodTarget(); _methodIL = new InstantiatedMethodIL(sharedMethod, uninstantiatiedMethodIL); } else { _methodIL = methodIL; } _canonMethod = method; var ilExceptionRegions = methodIL.GetExceptionRegions(); _exceptionRegions = new ExceptionRegion[ilExceptionRegions.Length]; for (int i = 0; i < ilExceptionRegions.Length; i++) { _exceptionRegions[i] = new ExceptionRegion() { ILRegion = ilExceptionRegions[i] }; } } public DependencyList Import() { TypeDesc owningType = _canonMethod.OwningType; if (_compilation.HasLazyStaticConstructor(owningType)) { // Don't trigger cctor if this is a fallback compilation (bad cctor could have been the reason for fallback). // Otherwise follow the rules from ECMA-335 I.8.9.5. if (!_isFallbackBodyCompilation && (_canonMethod.Signature.IsStatic || _canonMethod.IsConstructor || owningType.IsValueType)) { // For beforefieldinit, we can wait for field access. if (!((MetadataType)owningType).IsBeforeFieldInit) { MethodDesc method = _methodIL.OwningMethod; if (method.OwningType.IsRuntimeDeterminedSubtype) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.GetNonGCStaticBase, method.OwningType), "Owning type cctor"); } else { _dependencies.Add(_factory.ReadyToRunHelper(ReadyToRunHelperId.GetNonGCStaticBase, method.OwningType), "Owning type cctor"); } } } } if (_canonMethod.IsSynchronized) { const string reason = "Synchronized method"; if (_canonMethod.Signature.IsStatic) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.MonitorEnterStatic), reason); _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.MonitorExitStatic), reason); } else { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.MonitorEnter), reason); _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.MonitorExit), reason); } } FindBasicBlocks(); ImportBasicBlocks(); CodeBasedDependencyAlgorithm.AddDependenciesDueToMethodCodePresence(ref _dependencies, _factory, _canonMethod, _canonMethodIL); return _dependencies; } private ISymbolNode GetGenericLookupHelper(ReadyToRunHelperId helperId, object helperArgument) { GenericDictionaryLookup lookup = _compilation.ComputeGenericLookup(_canonMethod, helperId, helperArgument); Debug.Assert(lookup.UseHelper); if (_canonMethod.RequiresInstMethodDescArg()) { return _compilation.NodeFactory.ReadyToRunHelperFromDictionaryLookup(lookup.HelperId, lookup.HelperObject, _canonMethod); } else { Debug.Assert(_canonMethod.RequiresInstArg() || _canonMethod.AcquiresInstMethodTableFromThis()); return _compilation.NodeFactory.ReadyToRunHelperFromTypeLookup(lookup.HelperId, lookup.HelperObject, _canonMethod.OwningType); } } private ISymbolNode GetHelperEntrypoint(ReadyToRunHelper helper) { return _compilation.GetHelperEntrypoint(helper); } private void MarkInstructionBoundary() { } private void EndImportingBasicBlock(BasicBlock basicBlock) { } private void StartImportingBasicBlock(BasicBlock basicBlock) { // Import all associated EH regions foreach (ExceptionRegion ehRegion in _exceptionRegions) { ILExceptionRegion region = ehRegion.ILRegion; if (region.TryOffset == basicBlock.StartOffset) { MarkBasicBlock(_basicBlocks[region.HandlerOffset]); if (region.Kind == ILExceptionRegionKind.Filter) MarkBasicBlock(_basicBlocks[region.FilterOffset]); // Once https://github.com/dotnet/corert/issues/3460 is done, this should be deleted. // Throwing InvalidProgram is not great, but we want to do *something* if this happens // because doing nothing means problems at runtime. This is not worth piping a // a new exception with a fancy message for. if (region.Kind == ILExceptionRegionKind.Catch) { TypeDesc catchType = (TypeDesc)_methodIL.GetObject(region.ClassToken); if (catchType.IsRuntimeDeterminedSubtype) ThrowHelper.ThrowInvalidProgramException(); } } } _currentInstructionOffset = -1; _previousInstructionOffset = -1; } private void StartImportingInstruction() { _previousInstructionOffset = _currentInstructionOffset; _currentInstructionOffset = _currentOffset; } private void EndImportingInstruction() { // The instruction should have consumed any prefixes. _constrained = null; _isReadOnly = false; } private void ImportJmp(int token) { // JMP is kind of like a tail call (with no arguments pushed on the stack). ImportCall(ILOpcode.call, token); } private void ImportCasting(ILOpcode opcode, int token) { TypeDesc type = (TypeDesc)_methodIL.GetObject(token); if (type.IsRuntimeDeterminedSubtype) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.TypeHandleForCasting, type), "IsInst/CastClass"); } else { _dependencies.Add(_compilation.ComputeConstantLookup(ReadyToRunHelperId.TypeHandleForCasting, type), "IsInst/CastClass"); } } private IMethodNode GetMethodEntrypoint(MethodDesc method) { if (method.HasInstantiation || method.OwningType.HasInstantiation) { _compilation.DetectGenericCycles(_canonMethod, method); } return _factory.MethodEntrypoint(method); } private void ImportCall(ILOpcode opcode, int token) { // We get both the canonical and runtime determined form - JitInterface mostly operates // on the canonical form. var runtimeDeterminedMethod = (MethodDesc)_methodIL.GetObject(token); var method = (MethodDesc)_canonMethodIL.GetObject(token); _compilation.NodeFactory.MetadataManager.GetDependenciesDueToAccess(ref _dependencies, _compilation.NodeFactory, _canonMethodIL, method); if (method.IsRawPInvoke()) { // Raw P/invokes don't have any dependencies. return; } string reason = null; switch (opcode) { case ILOpcode.newobj: reason = "newobj"; break; case ILOpcode.call: reason = "call"; break; case ILOpcode.callvirt: reason = "callvirt"; break; case ILOpcode.ldftn: reason = "ldftn"; break; case ILOpcode.ldvirtftn: reason = "ldvirtftn"; break; default: Debug.Assert(false); break; } if (opcode == ILOpcode.newobj) { TypeDesc owningType = runtimeDeterminedMethod.OwningType; if (owningType.IsString) { // String .ctor handled specially below } else if (owningType.IsGCPointer) { if (owningType.IsRuntimeDeterminedSubtype) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.TypeHandle, owningType), reason); } else { _dependencies.Add(_factory.ConstructedTypeSymbol(owningType), reason); } if (owningType.IsArray) { // RyuJIT is going to call the "MdArray" creation helper even if this is an SzArray, // hence the IsArray check above. Note that the MdArray helper can handle SzArrays. _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.NewMultiDimArr), reason); return; } else { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.NewObject), reason); } } if (owningType.IsDelegate) { // If this is a verifiable delegate construction sequence, the previous instruction is a ldftn/ldvirtftn if (_previousInstructionOffset >= 0 && _ilBytes[_previousInstructionOffset] == (byte)ILOpcode.prefix1) { // TODO: for ldvirtftn we need to also check for the `dup` instruction, otherwise this is a normal newobj. ILOpcode previousOpcode = (ILOpcode)(0x100 + _ilBytes[_previousInstructionOffset + 1]); if (previousOpcode == ILOpcode.ldvirtftn || previousOpcode == ILOpcode.ldftn) { int delTargetToken = ReadILTokenAt(_previousInstructionOffset + 2); var delTargetMethod = (MethodDesc)_methodIL.GetObject(delTargetToken); TypeDesc canonDelegateType = method.OwningType.ConvertToCanonForm(CanonicalFormKind.Specific); DelegateCreationInfo info = _compilation.GetDelegateCtor(canonDelegateType, delTargetMethod, previousOpcode == ILOpcode.ldvirtftn); if (info.NeedsRuntimeLookup) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.DelegateCtor, info), reason); } else { _dependencies.Add(_factory.ReadyToRunHelper(ReadyToRunHelperId.DelegateCtor, info), reason); } return; } } } } if (method.OwningType.IsDelegate && method.Name == "Invoke" && opcode != ILOpcode.ldftn && opcode != ILOpcode.ldvirtftn) { // This call is expanded as an intrinsic; it's not an actual function call. // Before codegen realizes this is an intrinsic, it might still ask questions about // the vtable of this virtual method, so let's make sure it's marked in the scanner's // dependency graph. _dependencies.Add(_factory.VTable(method.OwningType), reason); return; } if (method.IsIntrinsic) { if (IsRuntimeHelpersInitializeArrayOrCreateSpan(method)) { if (_previousInstructionOffset >= 0 && _ilBytes[_previousInstructionOffset] == (byte)ILOpcode.ldtoken) return; } if (IsActivatorDefaultConstructorOf(method)) { if (runtimeDeterminedMethod.IsRuntimeDeterminedExactMethod) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.DefaultConstructor, runtimeDeterminedMethod.Instantiation[0]), reason); } else { MethodDesc ctor = Compilation.GetConstructorForCreateInstanceIntrinsic(method.Instantiation[0]); _dependencies.Add(_factory.CanonicalEntrypoint(ctor), reason); } return; } if (IsActivatorAllocatorOf(method)) { if (runtimeDeterminedMethod.IsRuntimeDeterminedExactMethod) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.ObjectAllocator, runtimeDeterminedMethod.Instantiation[0]), reason); } else { _dependencies.Add(_compilation.ComputeConstantLookup(ReadyToRunHelperId.ObjectAllocator, method.Instantiation[0]), reason); } return; } if (method.OwningType.IsByReferenceOfT && (method.IsConstructor || method.Name == "get_Value")) { return; } if (IsEETypePtrOf(method)) { if (runtimeDeterminedMethod.IsRuntimeDeterminedExactMethod) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.TypeHandle, runtimeDeterminedMethod.Instantiation[0]), reason); } else { _dependencies.Add(_factory.ConstructedTypeSymbol(method.Instantiation[0]), reason); } return; } } TypeDesc exactType = method.OwningType; bool resolvedConstraint = false; bool forceUseRuntimeLookup = false; MethodDesc methodAfterConstraintResolution = method; if (_constrained != null) { // We have a "constrained." call. Try a partial resolve of the constraint call. Note that this // will not necessarily resolve the call exactly, since we might be compiling // shared generic code - it may just resolve it to a candidate suitable for // JIT compilation, and require a runtime lookup for the actual code pointer // to call. TypeDesc constrained = _constrained; if (constrained.IsRuntimeDeterminedSubtype) constrained = constrained.ConvertToCanonForm(CanonicalFormKind.Specific); MethodDesc directMethod = constrained.GetClosestDefType().TryResolveConstraintMethodApprox(method.OwningType, method, out forceUseRuntimeLookup); if (directMethod == null && constrained.IsEnum) { // Constrained calls to methods on enum methods resolve to System.Enum's methods. System.Enum is a reference // type though, so we would fail to resolve and box. We have a special path for those to avoid boxing. directMethod = _compilation.TypeSystemContext.TryResolveConstrainedEnumMethod(constrained, method); } if (directMethod != null) { // Either // 1. no constraint resolution at compile time (!directMethod) // OR 2. no code sharing lookup in call // OR 3. we have have resolved to an instantiating stub methodAfterConstraintResolution = directMethod; Debug.Assert(!methodAfterConstraintResolution.OwningType.IsInterface); resolvedConstraint = true; exactType = constrained; } else if (constrained.IsValueType) { // We'll need to box `this`. Note we use _constrained here, because the other one is canonical. AddBoxingDependencies(_constrained, reason); } } MethodDesc targetMethod = methodAfterConstraintResolution; bool exactContextNeedsRuntimeLookup; if (targetMethod.HasInstantiation) { exactContextNeedsRuntimeLookup = targetMethod.IsSharedByGenericInstantiations; } else { exactContextNeedsRuntimeLookup = exactType.IsCanonicalSubtype(CanonicalFormKind.Any); } // // Determine whether to perform direct call // bool directCall = false; if (targetMethod.Signature.IsStatic) { // Static methods are always direct calls directCall = true; } else if ((opcode != ILOpcode.callvirt && opcode != ILOpcode.ldvirtftn) || resolvedConstraint) { directCall = true; } else { if (!targetMethod.IsVirtual || // Final/sealed has no meaning for interfaces, but lets us devirtualize otherwise (!targetMethod.OwningType.IsInterface && (targetMethod.IsFinal || targetMethod.OwningType.IsSealed()))) { directCall = true; } } if (directCall && targetMethod.IsAbstract) { ThrowHelper.ThrowBadImageFormatException(); } bool allowInstParam = opcode != ILOpcode.ldvirtftn && opcode != ILOpcode.ldftn; if (directCall && !allowInstParam && targetMethod.GetCanonMethodTarget(CanonicalFormKind.Specific).RequiresInstArg()) { // Needs a single address to call this method but the method needs a hidden argument. // We need a fat function pointer for this that captures both things. if (exactContextNeedsRuntimeLookup) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.MethodEntry, runtimeDeterminedMethod), reason); } else { _dependencies.Add(_factory.FatFunctionPointer(runtimeDeterminedMethod), reason); } } else if (directCall && resolvedConstraint && exactContextNeedsRuntimeLookup) { // We want to do a direct call to a shared method on a valuetype. We need to provide // a generic context, but the JitInterface doesn't provide a way for us to do it from here. // So we do the next best thing and ask RyuJIT to look up a fat pointer. // // We have the canonical version of the method - find the runtime determined version. // This is simplified because we know the method is on a valuetype. Debug.Assert(targetMethod.OwningType.IsValueType); MethodDesc targetOfLookup; if (_constrained.IsRuntimeDeterminedType) targetOfLookup = _compilation.TypeSystemContext.GetMethodForRuntimeDeterminedType(targetMethod.GetTypicalMethodDefinition(), (RuntimeDeterminedType)_constrained); else targetOfLookup = _compilation.TypeSystemContext.GetMethodForInstantiatedType(targetMethod.GetTypicalMethodDefinition(), (InstantiatedType)_constrained); if (targetOfLookup.HasInstantiation) { targetOfLookup = targetOfLookup.MakeInstantiatedMethod(runtimeDeterminedMethod.Instantiation); } Debug.Assert(targetOfLookup.GetCanonMethodTarget(CanonicalFormKind.Specific) == targetMethod.GetCanonMethodTarget(CanonicalFormKind.Specific)); _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.MethodEntry, targetOfLookup), reason); } else if (directCall) { bool referencingArrayAddressMethod = false; if (targetMethod.IsIntrinsic) { // If this is an intrinsic method with a callsite-specific expansion, this will replace // the method with a method the intrinsic expands into. If it's not the special intrinsic, // method stays unchanged. targetMethod = _compilation.ExpandIntrinsicForCallsite(targetMethod, _canonMethod); // Array address method requires special dependency tracking. referencingArrayAddressMethod = targetMethod.IsArrayAddressMethod(); } MethodDesc concreteMethod = targetMethod; targetMethod = targetMethod.GetCanonMethodTarget(CanonicalFormKind.Specific); if (targetMethod.IsConstructor && targetMethod.OwningType.IsString) { _dependencies.Add(_factory.StringAllocator(targetMethod), reason); } else if (exactContextNeedsRuntimeLookup) { if (targetMethod.IsSharedByGenericInstantiations && !resolvedConstraint && !referencingArrayAddressMethod) { ISymbolNode instParam = null; if (targetMethod.RequiresInstMethodDescArg()) { instParam = GetGenericLookupHelper(ReadyToRunHelperId.MethodDictionary, runtimeDeterminedMethod); } else if (targetMethod.RequiresInstMethodTableArg()) { bool hasHiddenParameter = true; if (targetMethod.IsIntrinsic) { if (_factory.TypeSystemContext.IsSpecialUnboxingThunkTargetMethod(targetMethod)) hasHiddenParameter = false; } if (hasHiddenParameter) instParam = GetGenericLookupHelper(ReadyToRunHelperId.TypeHandle, runtimeDeterminedMethod.OwningType); } if (instParam != null) { _dependencies.Add(instParam, reason); } if (instParam == null && !targetMethod.OwningType.IsValueType && !_factory.TypeSystemContext.IsSpecialUnboxingThunk(_canonMethod)) { // We have a call to a shared instance method and we're already in a shared context. // e.g. this is a call to Foo<T>.Method() and we're about to add Foo<__Canon>.Method() // to the dependency graph). // // We will pretend the runtime determined owning type (Foo<T>) got allocated as well. // This is because RyuJIT might end up inlining the shared method body, making it concrete again, // without actually having to go through a dictionary. // (This would require inlining across two generic contexts, but RyuJIT does that.) // // If we didn't have a constructed type for this at the scanning time, we wouldn't // know the dictionary dependencies at the inlined site, leading to a compile failure. // (Remember that dictionary dependencies of instance methods on generic reference types // are tied to the owning type.) // // This is not ideal, because if e.g. Foo<string> never got allocated otherwise, this code is // unreachable and we're making the scanner scan more of it. // // Technically, we could get away with injecting a RuntimeDeterminedMethodNode here // but that introduces more complexities and doesn't seem worth it at this time. Debug.Assert(targetMethod.AcquiresInstMethodTableFromThis()); _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.TypeHandle, runtimeDeterminedMethod.OwningType), reason + " - inlining protection"); } _dependencies.Add(_factory.CanonicalEntrypoint(targetMethod), reason); } else { Debug.Assert(!forceUseRuntimeLookup); _dependencies.Add(GetMethodEntrypoint(targetMethod), reason); if (targetMethod.RequiresInstMethodTableArg() && resolvedConstraint) { if (_constrained.IsRuntimeDeterminedSubtype) _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.TypeHandle, _constrained), reason); else _dependencies.Add(_factory.ConstructedTypeSymbol(_constrained), reason); } if (referencingArrayAddressMethod && !_isReadOnly) { // Address method is special - it expects an instantiation argument, unless a readonly prefix was applied. _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.TypeHandle, runtimeDeterminedMethod.OwningType), reason); } } } else { ISymbolNode instParam = null; if (targetMethod.RequiresInstMethodDescArg()) { instParam = _compilation.NodeFactory.MethodGenericDictionary(concreteMethod); } else if (targetMethod.RequiresInstMethodTableArg() || (referencingArrayAddressMethod && !_isReadOnly)) { // Ask for a constructed type symbol because we need the vtable to get to the dictionary instParam = _compilation.NodeFactory.ConstructedTypeSymbol(concreteMethod.OwningType); } if (instParam != null) { _dependencies.Add(instParam, reason); } if (instParam == null && concreteMethod != targetMethod && targetMethod.OwningType.NormalizeInstantiation() == targetMethod.OwningType && !targetMethod.OwningType.IsValueType) { // We have a call to a shared instance method and we still know the concrete // type of the generic instance (e.g. this is a call to Foo<string>.Method() // and we're about to add Foo<__Canon>.Method() to the dependency graph). // // We will pretend the concrete type got allocated as well. This is because RyuJIT might // end up inlining the shared method body, making it concrete again. // // If we didn't have a constructed type for this at the scanning time, we wouldn't // know the dictionary dependencies at the inlined site, leading to a compile failure. // (Remember that dictionary dependencies of instance methods on generic reference types // are tied to the owning type.) // // This is not ideal, because if Foo<string> never got allocated otherwise, this code is // unreachable and we're making the scanner scan more of it. // // Technically, we could get away with injecting a ShadowConcreteMethod for the concrete // method, but that's more complex and doesn't seem worth it at this time. Debug.Assert(targetMethod.AcquiresInstMethodTableFromThis()); _dependencies.Add(_compilation.NodeFactory.MaximallyConstructableType(concreteMethod.OwningType), reason + " - inlining protection"); } _dependencies.Add(GetMethodEntrypoint(targetMethod), reason); } } else if (method.HasInstantiation) { // Generic virtual method call MethodDesc methodToLookup = _compilation.GetTargetOfGenericVirtualMethodCall(runtimeDeterminedMethod); _compilation.DetectGenericCycles( _canonMethod, methodToLookup.GetCanonMethodTarget(CanonicalFormKind.Specific)); if (exactContextNeedsRuntimeLookup) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.MethodHandle, methodToLookup), reason); } else { _dependencies.Add(_factory.RuntimeMethodHandle(methodToLookup), reason); } _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.GVMLookupForSlot), reason); } else if (method.OwningType.IsInterface) { if (exactContextNeedsRuntimeLookup) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.VirtualDispatchCell, runtimeDeterminedMethod), reason); } else { _dependencies.Add(_factory.InterfaceDispatchCell(method), reason); } } else if (_compilation.HasFixedSlotVTable(method.OwningType)) { // No dependencies: virtual call through the vtable } else { MethodDesc slotDefiningMethod = targetMethod.IsNewSlot ? targetMethod : MetadataVirtualMethodAlgorithm.FindSlotDefiningMethodForVirtualMethod(targetMethod); _dependencies.Add(_factory.VirtualMethodUse(slotDefiningMethod), reason); } } private void ImportLdFtn(int token, ILOpcode opCode) { // Is this a verifiable delegate creation? If so, we will handle it when we reach the newobj if (_ilBytes[_currentOffset] == (byte)ILOpcode.newobj) { int delegateToken = ReadILTokenAt(_currentOffset + 1); var delegateType = ((MethodDesc)_methodIL.GetObject(delegateToken)).OwningType; if (delegateType.IsDelegate) return; } ImportCall(opCode, token); } private void ImportBranch(ILOpcode opcode, BasicBlock target, BasicBlock fallthrough) { ImportFallthrough(target); if (fallthrough != null) ImportFallthrough(fallthrough); } private void ImportSwitchJump(int jmpBase, int[] jmpDelta, BasicBlock fallthrough) { for (int i = 0; i < jmpDelta.Length; i++) { BasicBlock target = _basicBlocks[jmpBase + jmpDelta[i]]; ImportFallthrough(target); } if (fallthrough != null) ImportFallthrough(fallthrough); } private void ImportUnbox(int token, ILOpcode opCode) { TypeDesc type = (TypeDesc)_methodIL.GetObject(token); if (!type.IsValueType) { if (opCode == ILOpcode.unbox_any) { // When applied to a reference type, unbox_any has the same effect as castclass. ImportCasting(ILOpcode.castclass, token); } return; } if (type.IsRuntimeDeterminedSubtype) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.TypeHandle, type), "Unbox"); } else { _dependencies.Add(_factory.NecessaryTypeSymbol(type), "Unbox"); } ReadyToRunHelper helper; if (opCode == ILOpcode.unbox) { helper = ReadyToRunHelper.Unbox; } else { Debug.Assert(opCode == ILOpcode.unbox_any); helper = ReadyToRunHelper.Unbox_Nullable; } _dependencies.Add(GetHelperEntrypoint(helper), "Unbox"); } private void ImportRefAnyVal(int token) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.GetRefAny), "refanyval"); } private void ImportMkRefAny(int token) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.TypeHandleToRuntimeType), "mkrefany"); _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.TypeHandleToRuntimeTypeHandle), "mkrefany"); } private void ImportLdToken(int token) { object obj = _methodIL.GetObject(token); if (obj is TypeDesc) { // If this is a ldtoken Type / Type.GetTypeFromHandle sequence, we need one more helper. // We might also be able to optimize this a little if this is a ldtoken/GetTypeFromHandle/Equals sequence. bool isTypeEquals = false; BasicBlock nextBasicBlock = _basicBlocks[_currentOffset]; if (nextBasicBlock == null) { if ((ILOpcode)_ilBytes[_currentOffset] == ILOpcode.call) { int methodToken = ReadILTokenAt(_currentOffset + 1); var method = (MethodDesc)_methodIL.GetObject(methodToken); if (IsTypeGetTypeFromHandle(method)) { // Codegen will swap this one for GetRuntimeTypeHandle when optimizing _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.GetRuntimeType), "ldtoken"); // Is the next instruction a call to Type::Equals? nextBasicBlock = _basicBlocks[_currentOffset + 5]; if (nextBasicBlock == null) { if ((ILOpcode)_ilBytes[_currentOffset + 5] == ILOpcode.call) { methodToken = ReadILTokenAt(_currentOffset + 6); method = (MethodDesc)_methodIL.GetObject(methodToken); isTypeEquals = IsTypeEquals(method); } } } } } _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.GetRuntimeTypeHandle), "ldtoken"); var type = (TypeDesc)obj; ISymbolNode reference; if (type.IsRuntimeDeterminedSubtype) { reference = GetGenericLookupHelper(ReadyToRunHelperId.TypeHandle, type); } else { reference = _compilation.ComputeConstantLookup( isTypeEquals ? ReadyToRunHelperId.NecessaryTypeHandle : _compilation.GetLdTokenHelperForType(type), type); } _dependencies.Add(reference, "ldtoken"); } else if (obj is MethodDesc) { var method = (MethodDesc)obj; if (method.IsRuntimeDeterminedExactMethod) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.MethodHandle, method), "ldtoken"); } else { _dependencies.Add(_factory.RuntimeMethodHandle(method), "ldtoken"); } _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.GetRuntimeMethodHandle), "ldtoken"); } else { Debug.Assert(obj is FieldDesc); // First check if this is a ldtoken Field followed by InitializeArray or CreateSpan. BasicBlock nextBasicBlock = _basicBlocks[_currentOffset]; if (nextBasicBlock == null) { if ((ILOpcode)_ilBytes[_currentOffset] == ILOpcode.call) { int methodToken = ReadILTokenAt(_currentOffset + 1); var method = (MethodDesc)_methodIL.GetObject(methodToken); if (IsRuntimeHelpersInitializeArrayOrCreateSpan(method)) { // Codegen expands this and doesn't do the normal ldtoken. return; } } } var field = (FieldDesc)obj; if (field.OwningType.IsRuntimeDeterminedSubtype) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.FieldHandle, field), "ldtoken"); } else { _dependencies.Add(_factory.RuntimeFieldHandle(field), "ldtoken"); } _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.GetRuntimeFieldHandle), "ldtoken"); } } private void ImportRefAnyType() { // TODO } private void ImportArgList() { } private void ImportConstrainedPrefix(int token) { _constrained = (TypeDesc)_methodIL.GetObject(token); } private void ImportReadOnlyPrefix() { _isReadOnly = true; } private void ImportFieldAccess(int token, bool isStatic, string reason) { var field = (FieldDesc)_methodIL.GetObject(token); _compilation.NodeFactory.MetadataManager.GetDependenciesDueToAccess(ref _dependencies, _compilation.NodeFactory, _canonMethodIL, field); // Covers both ldsfld/ldsflda and ldfld/ldflda with a static field if (isStatic || field.IsStatic) { // ldsfld/ldsflda with an instance field is invalid IL if (isStatic && !field.IsStatic) ThrowHelper.ThrowInvalidProgramException(); // References to literal fields from IL body should never resolve. // The CLR would throw a MissingFieldException while jitting and so should we. if (field.IsLiteral) ThrowHelper.ThrowMissingFieldException(field.OwningType, field.Name); if (field.HasRva) { // We don't care about field RVA data for the usual cases, but if this is one of the // magic fields the compiler synthetized, the data blob might bring more dependencies // and we need to scan those. _dependencies.Add(_compilation.GetFieldRvaData(field), reason); // TODO: lazy cctor dependency return; } ReadyToRunHelperId helperId; if (field.IsThreadStatic) { helperId = ReadyToRunHelperId.GetThreadStaticBase; } else if (field.HasGCStaticBase) { helperId = ReadyToRunHelperId.GetGCStaticBase; } else { helperId = ReadyToRunHelperId.GetNonGCStaticBase; } TypeDesc owningType = field.OwningType; if (owningType.IsRuntimeDeterminedSubtype) { _dependencies.Add(GetGenericLookupHelper(helperId, owningType), reason); } else { _dependencies.Add(_factory.ReadyToRunHelper(helperId, owningType), reason); } } } private void ImportLoadField(int token, bool isStatic) { ImportFieldAccess(token, isStatic, isStatic ? "ldsfld" : "ldfld"); } private void ImportAddressOfField(int token, bool isStatic) { ImportFieldAccess(token, isStatic, isStatic ? "ldsflda" : "ldflda"); } private void ImportStoreField(int token, bool isStatic) { ImportFieldAccess(token, isStatic, isStatic ? "stsfld" : "stfld"); } private void ImportLoadString(int token) { // If we care, this can include allocating the frozen string node. _dependencies.Add(_factory.SerializedStringObject(""), "ldstr"); } private void ImportBox(int token) { AddBoxingDependencies((TypeDesc)_methodIL.GetObject(token), "Box"); } private void AddBoxingDependencies(TypeDesc type, string reason) { Debug.Assert(!type.IsCanonicalSubtype(CanonicalFormKind.Any)); // Generic code will have BOX instructions when referring to T - the instruction is a no-op // if the substitution wasn't a value type. if (!type.IsValueType) return; if (type.IsRuntimeDeterminedSubtype) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.TypeHandle, type), reason); } else { _dependencies.Add(_factory.ConstructedTypeSymbol(type), reason); } if (type.IsNullable) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.Box), reason); } else { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.Box_Nullable), reason); } } private void ImportLeave(BasicBlock target) { ImportFallthrough(target); } private void ImportNewArray(int token) { var type = ((TypeDesc)_methodIL.GetObject(token)).MakeArrayType(); if (type.IsRuntimeDeterminedSubtype) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.TypeHandle, type), "newarr"); _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.NewArray), "newarr"); } else { _dependencies.Add(_factory.ConstructedTypeSymbol(type), "newarr"); } } private void ImportLoadElement(int token) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.RngChkFail), "ldelem"); } private void ImportLoadElement(TypeDesc elementType) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.RngChkFail), "ldelem"); } private void ImportStoreElement(int token) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.RngChkFail), "stelem"); } private void ImportStoreElement(TypeDesc elementType) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.RngChkFail), "stelem"); } private void ImportAddressOfElement(int token) { TypeDesc elementType = (TypeDesc)_methodIL.GetObject(token); if (elementType.IsGCPointer && !_isReadOnly) { if (elementType.IsRuntimeDeterminedSubtype) _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.TypeHandle, elementType), "ldelema"); else _dependencies.Add(_factory.NecessaryTypeSymbol(elementType), "ldelema"); } _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.RngChkFail), "ldelema"); } private void ImportBinaryOperation(ILOpcode opcode) { switch (opcode) { case ILOpcode.add_ovf: case ILOpcode.add_ovf_un: case ILOpcode.sub_ovf: case ILOpcode.sub_ovf_un: _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.Overflow), "_ovf"); break; case ILOpcode.mul_ovf: case ILOpcode.mul_ovf_un: if (_compilation.TypeSystemContext.Target.Architecture == TargetArchitecture.ARM) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.LMulOfv), "_lmulovf"); _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.ULMulOvf), "_ulmulovf"); } _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.Overflow), "_ovf"); break; case ILOpcode.div: case ILOpcode.div_un: if (_compilation.TypeSystemContext.Target.Architecture == TargetArchitecture.ARM) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.ULDiv), "_uldiv"); _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.LDiv), "_ldiv"); _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.UDiv), "_udiv"); _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.Div), "_div"); } else if (_compilation.TypeSystemContext.Target.Architecture == TargetArchitecture.ARM64) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.ThrowDivZero), "_divbyzero"); } break; case ILOpcode.rem: case ILOpcode.rem_un: if (_compilation.TypeSystemContext.Target.Architecture == TargetArchitecture.ARM) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.ULMod), "_ulmod"); _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.LMod), "_lmod"); _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.UMod), "_umod"); _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.Mod), "_mod"); } else if (_compilation.TypeSystemContext.Target.Architecture == TargetArchitecture.ARM64) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.ThrowDivZero), "_divbyzero"); } break; } } private void ImportFallthrough(BasicBlock next) { MarkBasicBlock(next); } private int ReadILTokenAt(int ilOffset) { return (int)(_ilBytes[ilOffset] + (_ilBytes[ilOffset + 1] << 8) + (_ilBytes[ilOffset + 2] << 16) + (_ilBytes[ilOffset + 3] << 24)); } private void ReportInvalidBranchTarget(int targetOffset) { ThrowHelper.ThrowInvalidProgramException(); } private void ReportFallthroughAtEndOfMethod() { ThrowHelper.ThrowInvalidProgramException(); } private void ReportMethodEndInsideInstruction() { ThrowHelper.ThrowInvalidProgramException(); } private void ReportInvalidInstruction(ILOpcode opcode) { ThrowHelper.ThrowInvalidProgramException(); } private bool IsRuntimeHelpersInitializeArrayOrCreateSpan(MethodDesc method) { if (method.IsIntrinsic) { string name = method.Name; if (name == "InitializeArray" || name == "CreateSpan") { MetadataType owningType = method.OwningType as MetadataType; if (owningType != null) { return owningType.Name == "RuntimeHelpers" && owningType.Namespace == "System.Runtime.CompilerServices"; } } } return false; } private bool IsTypeGetTypeFromHandle(MethodDesc method) { if (method.IsIntrinsic && method.Name == "GetTypeFromHandle") { MetadataType owningType = method.OwningType as MetadataType; if (owningType != null) { return owningType.Name == "Type" && owningType.Namespace == "System"; } } return false; } private bool IsTypeEquals(MethodDesc method) { if (method.IsIntrinsic && method.Name == "op_Equality") { MetadataType owningType = method.OwningType as MetadataType; if (owningType != null) { return owningType.Name == "Type" && owningType.Namespace == "System"; } } return false; } private bool IsActivatorDefaultConstructorOf(MethodDesc method) { if (method.IsIntrinsic && method.Name == "DefaultConstructorOf" && method.Instantiation.Length == 1) { MetadataType owningType = method.OwningType as MetadataType; if (owningType != null) { return owningType.Name == "Activator" && owningType.Namespace == "System"; } } return false; } private bool IsActivatorAllocatorOf(MethodDesc method) { if (method.IsIntrinsic && method.Name == "AllocatorOf" && method.Instantiation.Length == 1) { MetadataType owningType = method.OwningType as MetadataType; if (owningType != null) { return owningType.Name == "Activator" && owningType.Namespace == "System"; } } return false; } private bool IsEETypePtrOf(MethodDesc method) { if (method.IsIntrinsic && (method.Name == "EETypePtrOf" || method.Name == "MethodTableOf") && method.Instantiation.Length == 1) { MetadataType owningType = method.OwningType as MetadataType; if (owningType != null) { return (owningType.Name == "EETypePtr" && owningType.Namespace == "System") || (owningType.Name == "Object" && owningType.Namespace == "System"); } } return false; } private TypeDesc GetWellKnownType(WellKnownType wellKnownType) { return _compilation.TypeSystemContext.GetWellKnownType(wellKnownType); } private void ImportNop() { } private void ImportBreak() { } private void ImportLoadVar(int index, bool argument) { } private void ImportStoreVar(int index, bool argument) { } private void ImportAddressOfVar(int index, bool argument) { } private void ImportDup() { } private void ImportPop() { } private void ImportCalli(int token) { } private void ImportLoadNull() { } private void ImportReturn() { } private void ImportLoadInt(long value, StackValueKind kind) { } private void ImportLoadFloat(double value) { } private void ImportLoadIndirect(int token) { } private void ImportLoadIndirect(TypeDesc type) { } private void ImportStoreIndirect(int token) { } private void ImportStoreIndirect(TypeDesc type) { } private void ImportShiftOperation(ILOpcode opcode) { } private void ImportCompareOperation(ILOpcode opcode) { } private void ImportConvert(WellKnownType wellKnownType, bool checkOverflow, bool unsigned) { } private void ImportUnaryOperation(ILOpcode opCode) { } private void ImportCpOpj(int token) { } private void ImportCkFinite() { } private void ImportLocalAlloc() { } private void ImportEndFilter() { } private void ImportCpBlk() { } private void ImportInitBlk() { } private void ImportRethrow() { } private void ImportSizeOf(int token) { } private void ImportUnalignedPrefix(byte alignment) { } private void ImportVolatilePrefix() { } private void ImportTailPrefix() { } private void ImportNoPrefix(byte mask) { } private void ImportThrow() { } private void ImportInitObj(int token) { } private void ImportLoadLength() { } private void ImportEndFinally() { } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using Internal.TypeSystem; using Internal.ReadyToRunConstants; using ILCompiler; using ILCompiler.DependencyAnalysis; using Debug = System.Diagnostics.Debug; using DependencyList = ILCompiler.DependencyAnalysisFramework.DependencyNodeCore<ILCompiler.DependencyAnalysis.NodeFactory>.DependencyList; namespace Internal.IL { // Implements an IL scanner that scans method bodies to be compiled by the code generation // backend before the actual compilation happens to gain insights into the code. partial class ILImporter { private readonly MethodIL _methodIL; private readonly MethodIL _canonMethodIL; private readonly ILScanner _compilation; private readonly ILScanNodeFactory _factory; // True if we're scanning a throwing method body because scanning the real body failed. private readonly bool _isFallbackBodyCompilation; private readonly MethodDesc _canonMethod; private DependencyList _dependencies = new DependencyList(); private readonly byte[] _ilBytes; private class BasicBlock { // Common fields public enum ImportState : byte { Unmarked, IsPending } public BasicBlock Next; public int StartOffset; public ImportState State = ImportState.Unmarked; public bool TryStart; public bool FilterStart; public bool HandlerStart; } private bool _isReadOnly; private TypeDesc _constrained; private int _currentInstructionOffset; private int _previousInstructionOffset; private class ExceptionRegion { public ILExceptionRegion ILRegion; } private ExceptionRegion[] _exceptionRegions; public ILImporter(ILScanner compilation, MethodDesc method, MethodIL methodIL = null) { if (methodIL == null) { methodIL = compilation.GetMethodIL(method); } else { _isFallbackBodyCompilation = true; } // This is e.g. an "extern" method in C# without a DllImport or InternalCall. if (methodIL == null) { ThrowHelper.ThrowInvalidProgramException(ExceptionStringID.InvalidProgramSpecific, method); } _compilation = compilation; _factory = (ILScanNodeFactory)compilation.NodeFactory; _ilBytes = methodIL.GetILBytes(); _canonMethodIL = methodIL; // Get the runtime determined method IL so that this works right in shared code // and tokens in shared code resolve to runtime determined types. MethodIL uninstantiatiedMethodIL = methodIL.GetMethodILDefinition(); if (methodIL != uninstantiatiedMethodIL) { MethodDesc sharedMethod = method.GetSharedRuntimeFormMethodTarget(); _methodIL = new InstantiatedMethodIL(sharedMethod, uninstantiatiedMethodIL); } else { _methodIL = methodIL; } _canonMethod = method; var ilExceptionRegions = methodIL.GetExceptionRegions(); _exceptionRegions = new ExceptionRegion[ilExceptionRegions.Length]; for (int i = 0; i < ilExceptionRegions.Length; i++) { _exceptionRegions[i] = new ExceptionRegion() { ILRegion = ilExceptionRegions[i] }; } } public DependencyList Import() { TypeDesc owningType = _canonMethod.OwningType; if (_compilation.HasLazyStaticConstructor(owningType)) { // Don't trigger cctor if this is a fallback compilation (bad cctor could have been the reason for fallback). // Otherwise follow the rules from ECMA-335 I.8.9.5. if (!_isFallbackBodyCompilation && (_canonMethod.Signature.IsStatic || _canonMethod.IsConstructor || owningType.IsValueType)) { // For beforefieldinit, we can wait for field access. if (!((MetadataType)owningType).IsBeforeFieldInit) { MethodDesc method = _methodIL.OwningMethod; if (method.OwningType.IsRuntimeDeterminedSubtype) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.GetNonGCStaticBase, method.OwningType), "Owning type cctor"); } else { _dependencies.Add(_factory.ReadyToRunHelper(ReadyToRunHelperId.GetNonGCStaticBase, method.OwningType), "Owning type cctor"); } } } } if (_canonMethod.IsSynchronized) { const string reason = "Synchronized method"; if (_canonMethod.Signature.IsStatic) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.MonitorEnterStatic), reason); _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.MonitorExitStatic), reason); } else { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.MonitorEnter), reason); _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.MonitorExit), reason); } } FindBasicBlocks(); ImportBasicBlocks(); CodeBasedDependencyAlgorithm.AddDependenciesDueToMethodCodePresence(ref _dependencies, _factory, _canonMethod, _canonMethodIL); return _dependencies; } private ISymbolNode GetGenericLookupHelper(ReadyToRunHelperId helperId, object helperArgument) { GenericDictionaryLookup lookup = _compilation.ComputeGenericLookup(_canonMethod, helperId, helperArgument); Debug.Assert(lookup.UseHelper); if (_canonMethod.RequiresInstMethodDescArg()) { return _compilation.NodeFactory.ReadyToRunHelperFromDictionaryLookup(lookup.HelperId, lookup.HelperObject, _canonMethod); } else { Debug.Assert(_canonMethod.RequiresInstArg() || _canonMethod.AcquiresInstMethodTableFromThis()); return _compilation.NodeFactory.ReadyToRunHelperFromTypeLookup(lookup.HelperId, lookup.HelperObject, _canonMethod.OwningType); } } private ISymbolNode GetHelperEntrypoint(ReadyToRunHelper helper) { return _compilation.GetHelperEntrypoint(helper); } private void MarkInstructionBoundary() { } private void EndImportingBasicBlock(BasicBlock basicBlock) { } private void StartImportingBasicBlock(BasicBlock basicBlock) { // Import all associated EH regions foreach (ExceptionRegion ehRegion in _exceptionRegions) { ILExceptionRegion region = ehRegion.ILRegion; if (region.TryOffset == basicBlock.StartOffset) { MarkBasicBlock(_basicBlocks[region.HandlerOffset]); if (region.Kind == ILExceptionRegionKind.Filter) MarkBasicBlock(_basicBlocks[region.FilterOffset]); // Once https://github.com/dotnet/corert/issues/3460 is done, this should be deleted. // Throwing InvalidProgram is not great, but we want to do *something* if this happens // because doing nothing means problems at runtime. This is not worth piping a // a new exception with a fancy message for. if (region.Kind == ILExceptionRegionKind.Catch) { TypeDesc catchType = (TypeDesc)_methodIL.GetObject(region.ClassToken); if (catchType.IsRuntimeDeterminedSubtype) ThrowHelper.ThrowInvalidProgramException(); } } } _currentInstructionOffset = -1; _previousInstructionOffset = -1; } private void StartImportingInstruction() { _previousInstructionOffset = _currentInstructionOffset; _currentInstructionOffset = _currentOffset; } private void EndImportingInstruction() { // The instruction should have consumed any prefixes. _constrained = null; _isReadOnly = false; } private void ImportJmp(int token) { // JMP is kind of like a tail call (with no arguments pushed on the stack). ImportCall(ILOpcode.call, token); } private void ImportCasting(ILOpcode opcode, int token) { TypeDesc type = (TypeDesc)_methodIL.GetObject(token); if (type.IsRuntimeDeterminedSubtype) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.TypeHandleForCasting, type), "IsInst/CastClass"); } else { _dependencies.Add(_compilation.ComputeConstantLookup(ReadyToRunHelperId.TypeHandleForCasting, type), "IsInst/CastClass"); } } private IMethodNode GetMethodEntrypoint(MethodDesc method) { if (method.HasInstantiation || method.OwningType.HasInstantiation) { _compilation.DetectGenericCycles(_canonMethod, method); } return _factory.MethodEntrypoint(method); } private void ImportCall(ILOpcode opcode, int token) { // We get both the canonical and runtime determined form - JitInterface mostly operates // on the canonical form. var runtimeDeterminedMethod = (MethodDesc)_methodIL.GetObject(token); var method = (MethodDesc)_canonMethodIL.GetObject(token); _compilation.NodeFactory.MetadataManager.GetDependenciesDueToAccess(ref _dependencies, _compilation.NodeFactory, _canonMethodIL, method); if (method.IsRawPInvoke()) { // Raw P/invokes don't have any dependencies. return; } string reason = null; switch (opcode) { case ILOpcode.newobj: reason = "newobj"; break; case ILOpcode.call: reason = "call"; break; case ILOpcode.callvirt: reason = "callvirt"; break; case ILOpcode.ldftn: reason = "ldftn"; break; case ILOpcode.ldvirtftn: reason = "ldvirtftn"; break; default: Debug.Assert(false); break; } if (opcode == ILOpcode.newobj) { TypeDesc owningType = runtimeDeterminedMethod.OwningType; if (owningType.IsString) { // String .ctor handled specially below } else if (owningType.IsGCPointer) { if (owningType.IsRuntimeDeterminedSubtype) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.TypeHandle, owningType), reason); } else { _dependencies.Add(_factory.ConstructedTypeSymbol(owningType), reason); } if (owningType.IsArray) { // RyuJIT is going to call the "MdArray" creation helper even if this is an SzArray, // hence the IsArray check above. Note that the MdArray helper can handle SzArrays. _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.NewMultiDimArr), reason); return; } else { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.NewObject), reason); } } if (owningType.IsDelegate) { // If this is a verifiable delegate construction sequence, the previous instruction is a ldftn/ldvirtftn if (_previousInstructionOffset >= 0 && _ilBytes[_previousInstructionOffset] == (byte)ILOpcode.prefix1) { // TODO: for ldvirtftn we need to also check for the `dup` instruction, otherwise this is a normal newobj. ILOpcode previousOpcode = (ILOpcode)(0x100 + _ilBytes[_previousInstructionOffset + 1]); if (previousOpcode == ILOpcode.ldvirtftn || previousOpcode == ILOpcode.ldftn) { int delTargetToken = ReadILTokenAt(_previousInstructionOffset + 2); var delTargetMethod = (MethodDesc)_methodIL.GetObject(delTargetToken); TypeDesc canonDelegateType = method.OwningType.ConvertToCanonForm(CanonicalFormKind.Specific); DelegateCreationInfo info = _compilation.GetDelegateCtor(canonDelegateType, delTargetMethod, previousOpcode == ILOpcode.ldvirtftn); if (info.NeedsRuntimeLookup) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.DelegateCtor, info), reason); } else { _dependencies.Add(_factory.ReadyToRunHelper(ReadyToRunHelperId.DelegateCtor, info), reason); } return; } } } } if (method.OwningType.IsDelegate && method.Name == "Invoke" && opcode != ILOpcode.ldftn && opcode != ILOpcode.ldvirtftn) { // This call is expanded as an intrinsic; it's not an actual function call. // Before codegen realizes this is an intrinsic, it might still ask questions about // the vtable of this virtual method, so let's make sure it's marked in the scanner's // dependency graph. _dependencies.Add(_factory.VTable(method.OwningType), reason); return; } if (method.IsIntrinsic) { if (IsRuntimeHelpersInitializeArrayOrCreateSpan(method)) { if (_previousInstructionOffset >= 0 && _ilBytes[_previousInstructionOffset] == (byte)ILOpcode.ldtoken) return; } if (IsActivatorDefaultConstructorOf(method)) { if (runtimeDeterminedMethod.IsRuntimeDeterminedExactMethod) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.DefaultConstructor, runtimeDeterminedMethod.Instantiation[0]), reason); } else { MethodDesc ctor = Compilation.GetConstructorForCreateInstanceIntrinsic(method.Instantiation[0]); _dependencies.Add(_factory.CanonicalEntrypoint(ctor), reason); } return; } if (IsActivatorAllocatorOf(method)) { if (runtimeDeterminedMethod.IsRuntimeDeterminedExactMethod) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.ObjectAllocator, runtimeDeterminedMethod.Instantiation[0]), reason); } else { _dependencies.Add(_compilation.ComputeConstantLookup(ReadyToRunHelperId.ObjectAllocator, method.Instantiation[0]), reason); } return; } if (method.OwningType.IsByReferenceOfT && (method.IsConstructor || method.Name == "get_Value")) { return; } if (IsEETypePtrOf(method)) { if (runtimeDeterminedMethod.IsRuntimeDeterminedExactMethod) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.TypeHandle, runtimeDeterminedMethod.Instantiation[0]), reason); } else { _dependencies.Add(_factory.ConstructedTypeSymbol(method.Instantiation[0]), reason); } return; } } TypeDesc exactType = method.OwningType; bool resolvedConstraint = false; bool forceUseRuntimeLookup = false; MethodDesc methodAfterConstraintResolution = method; if (_constrained != null) { // We have a "constrained." call. Try a partial resolve of the constraint call. Note that this // will not necessarily resolve the call exactly, since we might be compiling // shared generic code - it may just resolve it to a candidate suitable for // JIT compilation, and require a runtime lookup for the actual code pointer // to call. TypeDesc constrained = _constrained; if (constrained.IsRuntimeDeterminedSubtype) constrained = constrained.ConvertToCanonForm(CanonicalFormKind.Specific); MethodDesc directMethod = constrained.GetClosestDefType().TryResolveConstraintMethodApprox(method.OwningType, method, out forceUseRuntimeLookup); if (directMethod == null && constrained.IsEnum) { // Constrained calls to methods on enum methods resolve to System.Enum's methods. System.Enum is a reference // type though, so we would fail to resolve and box. We have a special path for those to avoid boxing. directMethod = _compilation.TypeSystemContext.TryResolveConstrainedEnumMethod(constrained, method); } if (directMethod != null) { // Either // 1. no constraint resolution at compile time (!directMethod) // OR 2. no code sharing lookup in call // OR 3. we have have resolved to an instantiating stub methodAfterConstraintResolution = directMethod; Debug.Assert(!methodAfterConstraintResolution.OwningType.IsInterface); resolvedConstraint = true; exactType = constrained; } else if (method.Signature.IsStatic) { Debug.Assert(method.OwningType.IsInterface); exactType = constrained; } else if (constrained.IsValueType) { // We'll need to box `this`. Note we use _constrained here, because the other one is canonical. AddBoxingDependencies(_constrained, reason); } } MethodDesc targetMethod = methodAfterConstraintResolution; bool exactContextNeedsRuntimeLookup; if (targetMethod.HasInstantiation) { exactContextNeedsRuntimeLookup = targetMethod.IsSharedByGenericInstantiations; } else { exactContextNeedsRuntimeLookup = exactType.IsCanonicalSubtype(CanonicalFormKind.Any); } // // Determine whether to perform direct call // bool directCall = false; if (targetMethod.Signature.IsStatic) { if (_constrained != null && (!resolvedConstraint || forceUseRuntimeLookup)) { // Constrained call to static virtual interface method we didn't resolve statically Debug.Assert(targetMethod.IsVirtual && targetMethod.OwningType.IsInterface); } else { // Static methods are always direct calls directCall = true; } } else if ((opcode != ILOpcode.callvirt && opcode != ILOpcode.ldvirtftn) || resolvedConstraint) { directCall = true; } else { if (!targetMethod.IsVirtual || // Final/sealed has no meaning for interfaces, but lets us devirtualize otherwise (!targetMethod.OwningType.IsInterface && (targetMethod.IsFinal || targetMethod.OwningType.IsSealed()))) { directCall = true; } } if (directCall && targetMethod.IsAbstract) { ThrowHelper.ThrowBadImageFormatException(); } bool allowInstParam = opcode != ILOpcode.ldvirtftn && opcode != ILOpcode.ldftn; if (directCall && !allowInstParam && targetMethod.GetCanonMethodTarget(CanonicalFormKind.Specific).RequiresInstArg()) { // Needs a single address to call this method but the method needs a hidden argument. // We need a fat function pointer for this that captures both things. if (exactContextNeedsRuntimeLookup) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.MethodEntry, runtimeDeterminedMethod), reason); } else { _dependencies.Add(_factory.FatFunctionPointer(runtimeDeterminedMethod), reason); } } else if (directCall && resolvedConstraint && exactContextNeedsRuntimeLookup) { // We want to do a direct call to a shared method on a valuetype. We need to provide // a generic context, but the JitInterface doesn't provide a way for us to do it from here. // So we do the next best thing and ask RyuJIT to look up a fat pointer. // // We have the canonical version of the method - find the runtime determined version. // This is simplified because we know the method is on a valuetype. Debug.Assert(targetMethod.OwningType.IsValueType); MethodDesc targetOfLookup; if (_constrained.IsRuntimeDeterminedType) targetOfLookup = _compilation.TypeSystemContext.GetMethodForRuntimeDeterminedType(targetMethod.GetTypicalMethodDefinition(), (RuntimeDeterminedType)_constrained); else if (_constrained.HasInstantiation) targetOfLookup = _compilation.TypeSystemContext.GetMethodForInstantiatedType(targetMethod.GetTypicalMethodDefinition(), (InstantiatedType)_constrained); else targetOfLookup = targetMethod.GetMethodDefinition(); if (targetOfLookup.HasInstantiation) { targetOfLookup = targetOfLookup.MakeInstantiatedMethod(runtimeDeterminedMethod.Instantiation); } Debug.Assert(targetOfLookup.GetCanonMethodTarget(CanonicalFormKind.Specific) == targetMethod.GetCanonMethodTarget(CanonicalFormKind.Specific)); _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.MethodEntry, targetOfLookup), reason); } else if (directCall) { bool referencingArrayAddressMethod = false; if (targetMethod.IsIntrinsic) { // If this is an intrinsic method with a callsite-specific expansion, this will replace // the method with a method the intrinsic expands into. If it's not the special intrinsic, // method stays unchanged. targetMethod = _compilation.ExpandIntrinsicForCallsite(targetMethod, _canonMethod); // Array address method requires special dependency tracking. referencingArrayAddressMethod = targetMethod.IsArrayAddressMethod(); } MethodDesc concreteMethod = targetMethod; targetMethod = targetMethod.GetCanonMethodTarget(CanonicalFormKind.Specific); if (targetMethod.IsConstructor && targetMethod.OwningType.IsString) { _dependencies.Add(_factory.StringAllocator(targetMethod), reason); } else if (exactContextNeedsRuntimeLookup) { if (targetMethod.IsSharedByGenericInstantiations && !resolvedConstraint && !referencingArrayAddressMethod) { ISymbolNode instParam = null; if (targetMethod.RequiresInstMethodDescArg()) { instParam = GetGenericLookupHelper(ReadyToRunHelperId.MethodDictionary, runtimeDeterminedMethod); } else if (targetMethod.RequiresInstMethodTableArg()) { bool hasHiddenParameter = true; if (targetMethod.IsIntrinsic) { if (_factory.TypeSystemContext.IsSpecialUnboxingThunkTargetMethod(targetMethod)) hasHiddenParameter = false; } if (hasHiddenParameter) instParam = GetGenericLookupHelper(ReadyToRunHelperId.TypeHandle, runtimeDeterminedMethod.OwningType); } if (instParam != null) { _dependencies.Add(instParam, reason); } if (instParam == null && !targetMethod.OwningType.IsValueType && !_factory.TypeSystemContext.IsSpecialUnboxingThunk(_canonMethod)) { // We have a call to a shared instance method and we're already in a shared context. // e.g. this is a call to Foo<T>.Method() and we're about to add Foo<__Canon>.Method() // to the dependency graph). // // We will pretend the runtime determined owning type (Foo<T>) got allocated as well. // This is because RyuJIT might end up inlining the shared method body, making it concrete again, // without actually having to go through a dictionary. // (This would require inlining across two generic contexts, but RyuJIT does that.) // // If we didn't have a constructed type for this at the scanning time, we wouldn't // know the dictionary dependencies at the inlined site, leading to a compile failure. // (Remember that dictionary dependencies of instance methods on generic reference types // are tied to the owning type.) // // This is not ideal, because if e.g. Foo<string> never got allocated otherwise, this code is // unreachable and we're making the scanner scan more of it. // // Technically, we could get away with injecting a RuntimeDeterminedMethodNode here // but that introduces more complexities and doesn't seem worth it at this time. Debug.Assert(targetMethod.AcquiresInstMethodTableFromThis()); _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.TypeHandle, runtimeDeterminedMethod.OwningType), reason + " - inlining protection"); } _dependencies.Add(_factory.CanonicalEntrypoint(targetMethod), reason); } else { Debug.Assert(!forceUseRuntimeLookup); _dependencies.Add(GetMethodEntrypoint(targetMethod), reason); if (targetMethod.RequiresInstMethodTableArg() && resolvedConstraint) { if (_constrained.IsRuntimeDeterminedSubtype) _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.TypeHandle, _constrained), reason); else _dependencies.Add(_factory.ConstructedTypeSymbol(_constrained), reason); } if (referencingArrayAddressMethod && !_isReadOnly) { // Address method is special - it expects an instantiation argument, unless a readonly prefix was applied. _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.TypeHandle, runtimeDeterminedMethod.OwningType), reason); } } } else { ISymbolNode instParam = null; if (targetMethod.RequiresInstMethodDescArg()) { instParam = _compilation.NodeFactory.MethodGenericDictionary(concreteMethod); } else if (targetMethod.RequiresInstMethodTableArg() || (referencingArrayAddressMethod && !_isReadOnly)) { // Ask for a constructed type symbol because we need the vtable to get to the dictionary instParam = _compilation.NodeFactory.ConstructedTypeSymbol(concreteMethod.OwningType); } if (instParam != null) { _dependencies.Add(instParam, reason); } if (instParam == null && concreteMethod != targetMethod && targetMethod.OwningType.NormalizeInstantiation() == targetMethod.OwningType && !targetMethod.OwningType.IsValueType) { // We have a call to a shared instance method and we still know the concrete // type of the generic instance (e.g. this is a call to Foo<string>.Method() // and we're about to add Foo<__Canon>.Method() to the dependency graph). // // We will pretend the concrete type got allocated as well. This is because RyuJIT might // end up inlining the shared method body, making it concrete again. // // If we didn't have a constructed type for this at the scanning time, we wouldn't // know the dictionary dependencies at the inlined site, leading to a compile failure. // (Remember that dictionary dependencies of instance methods on generic reference types // are tied to the owning type.) // // This is not ideal, because if Foo<string> never got allocated otherwise, this code is // unreachable and we're making the scanner scan more of it. // // Technically, we could get away with injecting a ShadowConcreteMethod for the concrete // method, but that's more complex and doesn't seem worth it at this time. Debug.Assert(targetMethod.AcquiresInstMethodTableFromThis()); _dependencies.Add(_compilation.NodeFactory.MaximallyConstructableType(concreteMethod.OwningType), reason + " - inlining protection"); } _dependencies.Add(GetMethodEntrypoint(targetMethod), reason); } } else if (method.Signature.IsStatic) { // This should be an unresolved static virtual interface method call. Other static methods should // have been handled as a directCall above. Debug.Assert(targetMethod.OwningType.IsInterface && targetMethod.IsVirtual && _constrained != null); var constrainedCallInfo = new ConstrainedCallInfo(_constrained, runtimeDeterminedMethod); _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.ConstrainedDirectCall, constrainedCallInfo), reason); } else if (method.HasInstantiation) { // Generic virtual method call MethodDesc methodToLookup = _compilation.GetTargetOfGenericVirtualMethodCall(runtimeDeterminedMethod); _compilation.DetectGenericCycles( _canonMethod, methodToLookup.GetCanonMethodTarget(CanonicalFormKind.Specific)); if (exactContextNeedsRuntimeLookup) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.MethodHandle, methodToLookup), reason); } else { _dependencies.Add(_factory.RuntimeMethodHandle(methodToLookup), reason); } _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.GVMLookupForSlot), reason); } else if (method.OwningType.IsInterface) { if (exactContextNeedsRuntimeLookup) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.VirtualDispatchCell, runtimeDeterminedMethod), reason); } else { _dependencies.Add(_factory.InterfaceDispatchCell(method), reason); } } else if (_compilation.HasFixedSlotVTable(method.OwningType)) { // No dependencies: virtual call through the vtable } else { MethodDesc slotDefiningMethod = targetMethod.IsNewSlot ? targetMethod : MetadataVirtualMethodAlgorithm.FindSlotDefiningMethodForVirtualMethod(targetMethod); _dependencies.Add(_factory.VirtualMethodUse(slotDefiningMethod), reason); } } private void ImportLdFtn(int token, ILOpcode opCode) { // Is this a verifiable delegate creation? If so, we will handle it when we reach the newobj if (_ilBytes[_currentOffset] == (byte)ILOpcode.newobj) { int delegateToken = ReadILTokenAt(_currentOffset + 1); var delegateType = ((MethodDesc)_methodIL.GetObject(delegateToken)).OwningType; if (delegateType.IsDelegate) return; } ImportCall(opCode, token); } private void ImportBranch(ILOpcode opcode, BasicBlock target, BasicBlock fallthrough) { ImportFallthrough(target); if (fallthrough != null) ImportFallthrough(fallthrough); } private void ImportSwitchJump(int jmpBase, int[] jmpDelta, BasicBlock fallthrough) { for (int i = 0; i < jmpDelta.Length; i++) { BasicBlock target = _basicBlocks[jmpBase + jmpDelta[i]]; ImportFallthrough(target); } if (fallthrough != null) ImportFallthrough(fallthrough); } private void ImportUnbox(int token, ILOpcode opCode) { TypeDesc type = (TypeDesc)_methodIL.GetObject(token); if (!type.IsValueType) { if (opCode == ILOpcode.unbox_any) { // When applied to a reference type, unbox_any has the same effect as castclass. ImportCasting(ILOpcode.castclass, token); } return; } if (type.IsRuntimeDeterminedSubtype) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.TypeHandle, type), "Unbox"); } else { _dependencies.Add(_factory.NecessaryTypeSymbol(type), "Unbox"); } ReadyToRunHelper helper; if (opCode == ILOpcode.unbox) { helper = ReadyToRunHelper.Unbox; } else { Debug.Assert(opCode == ILOpcode.unbox_any); helper = ReadyToRunHelper.Unbox_Nullable; } _dependencies.Add(GetHelperEntrypoint(helper), "Unbox"); } private void ImportRefAnyVal(int token) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.GetRefAny), "refanyval"); } private void ImportMkRefAny(int token) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.TypeHandleToRuntimeType), "mkrefany"); _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.TypeHandleToRuntimeTypeHandle), "mkrefany"); } private void ImportLdToken(int token) { object obj = _methodIL.GetObject(token); if (obj is TypeDesc) { // If this is a ldtoken Type / Type.GetTypeFromHandle sequence, we need one more helper. // We might also be able to optimize this a little if this is a ldtoken/GetTypeFromHandle/Equals sequence. bool isTypeEquals = false; BasicBlock nextBasicBlock = _basicBlocks[_currentOffset]; if (nextBasicBlock == null) { if ((ILOpcode)_ilBytes[_currentOffset] == ILOpcode.call) { int methodToken = ReadILTokenAt(_currentOffset + 1); var method = (MethodDesc)_methodIL.GetObject(methodToken); if (IsTypeGetTypeFromHandle(method)) { // Codegen will swap this one for GetRuntimeTypeHandle when optimizing _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.GetRuntimeType), "ldtoken"); // Is the next instruction a call to Type::Equals? nextBasicBlock = _basicBlocks[_currentOffset + 5]; if (nextBasicBlock == null) { if ((ILOpcode)_ilBytes[_currentOffset + 5] == ILOpcode.call) { methodToken = ReadILTokenAt(_currentOffset + 6); method = (MethodDesc)_methodIL.GetObject(methodToken); isTypeEquals = IsTypeEquals(method); } } } } } _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.GetRuntimeTypeHandle), "ldtoken"); var type = (TypeDesc)obj; ISymbolNode reference; if (type.IsRuntimeDeterminedSubtype) { reference = GetGenericLookupHelper(ReadyToRunHelperId.TypeHandle, type); } else { reference = _compilation.ComputeConstantLookup( isTypeEquals ? ReadyToRunHelperId.NecessaryTypeHandle : _compilation.GetLdTokenHelperForType(type), type); } _dependencies.Add(reference, "ldtoken"); } else if (obj is MethodDesc) { var method = (MethodDesc)obj; if (method.IsRuntimeDeterminedExactMethod) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.MethodHandle, method), "ldtoken"); } else { _dependencies.Add(_factory.RuntimeMethodHandle(method), "ldtoken"); } _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.GetRuntimeMethodHandle), "ldtoken"); } else { Debug.Assert(obj is FieldDesc); // First check if this is a ldtoken Field followed by InitializeArray or CreateSpan. BasicBlock nextBasicBlock = _basicBlocks[_currentOffset]; if (nextBasicBlock == null) { if ((ILOpcode)_ilBytes[_currentOffset] == ILOpcode.call) { int methodToken = ReadILTokenAt(_currentOffset + 1); var method = (MethodDesc)_methodIL.GetObject(methodToken); if (IsRuntimeHelpersInitializeArrayOrCreateSpan(method)) { // Codegen expands this and doesn't do the normal ldtoken. return; } } } var field = (FieldDesc)obj; if (field.OwningType.IsRuntimeDeterminedSubtype) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.FieldHandle, field), "ldtoken"); } else { _dependencies.Add(_factory.RuntimeFieldHandle(field), "ldtoken"); } _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.GetRuntimeFieldHandle), "ldtoken"); } } private void ImportRefAnyType() { // TODO } private void ImportArgList() { } private void ImportConstrainedPrefix(int token) { _constrained = (TypeDesc)_methodIL.GetObject(token); } private void ImportReadOnlyPrefix() { _isReadOnly = true; } private void ImportFieldAccess(int token, bool isStatic, string reason) { var field = (FieldDesc)_methodIL.GetObject(token); _compilation.NodeFactory.MetadataManager.GetDependenciesDueToAccess(ref _dependencies, _compilation.NodeFactory, _canonMethodIL, field); // Covers both ldsfld/ldsflda and ldfld/ldflda with a static field if (isStatic || field.IsStatic) { // ldsfld/ldsflda with an instance field is invalid IL if (isStatic && !field.IsStatic) ThrowHelper.ThrowInvalidProgramException(); // References to literal fields from IL body should never resolve. // The CLR would throw a MissingFieldException while jitting and so should we. if (field.IsLiteral) ThrowHelper.ThrowMissingFieldException(field.OwningType, field.Name); if (field.HasRva) { // We don't care about field RVA data for the usual cases, but if this is one of the // magic fields the compiler synthetized, the data blob might bring more dependencies // and we need to scan those. _dependencies.Add(_compilation.GetFieldRvaData(field), reason); // TODO: lazy cctor dependency return; } ReadyToRunHelperId helperId; if (field.IsThreadStatic) { helperId = ReadyToRunHelperId.GetThreadStaticBase; } else if (field.HasGCStaticBase) { helperId = ReadyToRunHelperId.GetGCStaticBase; } else { helperId = ReadyToRunHelperId.GetNonGCStaticBase; } TypeDesc owningType = field.OwningType; if (owningType.IsRuntimeDeterminedSubtype) { _dependencies.Add(GetGenericLookupHelper(helperId, owningType), reason); } else { _dependencies.Add(_factory.ReadyToRunHelper(helperId, owningType), reason); } } } private void ImportLoadField(int token, bool isStatic) { ImportFieldAccess(token, isStatic, isStatic ? "ldsfld" : "ldfld"); } private void ImportAddressOfField(int token, bool isStatic) { ImportFieldAccess(token, isStatic, isStatic ? "ldsflda" : "ldflda"); } private void ImportStoreField(int token, bool isStatic) { ImportFieldAccess(token, isStatic, isStatic ? "stsfld" : "stfld"); } private void ImportLoadString(int token) { // If we care, this can include allocating the frozen string node. _dependencies.Add(_factory.SerializedStringObject(""), "ldstr"); } private void ImportBox(int token) { AddBoxingDependencies((TypeDesc)_methodIL.GetObject(token), "Box"); } private void AddBoxingDependencies(TypeDesc type, string reason) { Debug.Assert(!type.IsCanonicalSubtype(CanonicalFormKind.Any)); // Generic code will have BOX instructions when referring to T - the instruction is a no-op // if the substitution wasn't a value type. if (!type.IsValueType) return; if (type.IsRuntimeDeterminedSubtype) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.TypeHandle, type), reason); } else { _dependencies.Add(_factory.ConstructedTypeSymbol(type), reason); } if (type.IsNullable) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.Box), reason); } else { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.Box_Nullable), reason); } } private void ImportLeave(BasicBlock target) { ImportFallthrough(target); } private void ImportNewArray(int token) { var type = ((TypeDesc)_methodIL.GetObject(token)).MakeArrayType(); if (type.IsRuntimeDeterminedSubtype) { _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.TypeHandle, type), "newarr"); _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.NewArray), "newarr"); } else { _dependencies.Add(_factory.ConstructedTypeSymbol(type), "newarr"); } } private void ImportLoadElement(int token) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.RngChkFail), "ldelem"); } private void ImportLoadElement(TypeDesc elementType) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.RngChkFail), "ldelem"); } private void ImportStoreElement(int token) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.RngChkFail), "stelem"); } private void ImportStoreElement(TypeDesc elementType) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.RngChkFail), "stelem"); } private void ImportAddressOfElement(int token) { TypeDesc elementType = (TypeDesc)_methodIL.GetObject(token); if (elementType.IsGCPointer && !_isReadOnly) { if (elementType.IsRuntimeDeterminedSubtype) _dependencies.Add(GetGenericLookupHelper(ReadyToRunHelperId.TypeHandle, elementType), "ldelema"); else _dependencies.Add(_factory.NecessaryTypeSymbol(elementType), "ldelema"); } _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.RngChkFail), "ldelema"); } private void ImportBinaryOperation(ILOpcode opcode) { switch (opcode) { case ILOpcode.add_ovf: case ILOpcode.add_ovf_un: case ILOpcode.sub_ovf: case ILOpcode.sub_ovf_un: _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.Overflow), "_ovf"); break; case ILOpcode.mul_ovf: case ILOpcode.mul_ovf_un: if (_compilation.TypeSystemContext.Target.Architecture == TargetArchitecture.ARM) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.LMulOfv), "_lmulovf"); _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.ULMulOvf), "_ulmulovf"); } _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.Overflow), "_ovf"); break; case ILOpcode.div: case ILOpcode.div_un: if (_compilation.TypeSystemContext.Target.Architecture == TargetArchitecture.ARM) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.ULDiv), "_uldiv"); _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.LDiv), "_ldiv"); _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.UDiv), "_udiv"); _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.Div), "_div"); } else if (_compilation.TypeSystemContext.Target.Architecture == TargetArchitecture.ARM64) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.ThrowDivZero), "_divbyzero"); } break; case ILOpcode.rem: case ILOpcode.rem_un: if (_compilation.TypeSystemContext.Target.Architecture == TargetArchitecture.ARM) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.ULMod), "_ulmod"); _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.LMod), "_lmod"); _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.UMod), "_umod"); _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.Mod), "_mod"); } else if (_compilation.TypeSystemContext.Target.Architecture == TargetArchitecture.ARM64) { _dependencies.Add(GetHelperEntrypoint(ReadyToRunHelper.ThrowDivZero), "_divbyzero"); } break; } } private void ImportFallthrough(BasicBlock next) { MarkBasicBlock(next); } private int ReadILTokenAt(int ilOffset) { return (int)(_ilBytes[ilOffset] + (_ilBytes[ilOffset + 1] << 8) + (_ilBytes[ilOffset + 2] << 16) + (_ilBytes[ilOffset + 3] << 24)); } private void ReportInvalidBranchTarget(int targetOffset) { ThrowHelper.ThrowInvalidProgramException(); } private void ReportFallthroughAtEndOfMethod() { ThrowHelper.ThrowInvalidProgramException(); } private void ReportMethodEndInsideInstruction() { ThrowHelper.ThrowInvalidProgramException(); } private void ReportInvalidInstruction(ILOpcode opcode) { ThrowHelper.ThrowInvalidProgramException(); } private bool IsRuntimeHelpersInitializeArrayOrCreateSpan(MethodDesc method) { if (method.IsIntrinsic) { string name = method.Name; if (name == "InitializeArray" || name == "CreateSpan") { MetadataType owningType = method.OwningType as MetadataType; if (owningType != null) { return owningType.Name == "RuntimeHelpers" && owningType.Namespace == "System.Runtime.CompilerServices"; } } } return false; } private bool IsTypeGetTypeFromHandle(MethodDesc method) { if (method.IsIntrinsic && method.Name == "GetTypeFromHandle") { MetadataType owningType = method.OwningType as MetadataType; if (owningType != null) { return owningType.Name == "Type" && owningType.Namespace == "System"; } } return false; } private bool IsTypeEquals(MethodDesc method) { if (method.IsIntrinsic && method.Name == "op_Equality") { MetadataType owningType = method.OwningType as MetadataType; if (owningType != null) { return owningType.Name == "Type" && owningType.Namespace == "System"; } } return false; } private bool IsActivatorDefaultConstructorOf(MethodDesc method) { if (method.IsIntrinsic && method.Name == "DefaultConstructorOf" && method.Instantiation.Length == 1) { MetadataType owningType = method.OwningType as MetadataType; if (owningType != null) { return owningType.Name == "Activator" && owningType.Namespace == "System"; } } return false; } private bool IsActivatorAllocatorOf(MethodDesc method) { if (method.IsIntrinsic && method.Name == "AllocatorOf" && method.Instantiation.Length == 1) { MetadataType owningType = method.OwningType as MetadataType; if (owningType != null) { return owningType.Name == "Activator" && owningType.Namespace == "System"; } } return false; } private bool IsEETypePtrOf(MethodDesc method) { if (method.IsIntrinsic && (method.Name == "EETypePtrOf" || method.Name == "MethodTableOf") && method.Instantiation.Length == 1) { MetadataType owningType = method.OwningType as MetadataType; if (owningType != null) { return (owningType.Name == "EETypePtr" && owningType.Namespace == "System") || (owningType.Name == "Object" && owningType.Namespace == "System"); } } return false; } private TypeDesc GetWellKnownType(WellKnownType wellKnownType) { return _compilation.TypeSystemContext.GetWellKnownType(wellKnownType); } private void ImportNop() { } private void ImportBreak() { } private void ImportLoadVar(int index, bool argument) { } private void ImportStoreVar(int index, bool argument) { } private void ImportAddressOfVar(int index, bool argument) { } private void ImportDup() { } private void ImportPop() { } private void ImportCalli(int token) { } private void ImportLoadNull() { } private void ImportReturn() { } private void ImportLoadInt(long value, StackValueKind kind) { } private void ImportLoadFloat(double value) { } private void ImportLoadIndirect(int token) { } private void ImportLoadIndirect(TypeDesc type) { } private void ImportStoreIndirect(int token) { } private void ImportStoreIndirect(TypeDesc type) { } private void ImportShiftOperation(ILOpcode opcode) { } private void ImportCompareOperation(ILOpcode opcode) { } private void ImportConvert(WellKnownType wellKnownType, bool checkOverflow, bool unsigned) { } private void ImportUnaryOperation(ILOpcode opCode) { } private void ImportCpOpj(int token) { } private void ImportCkFinite() { } private void ImportLocalAlloc() { } private void ImportEndFilter() { } private void ImportCpBlk() { } private void ImportInitBlk() { } private void ImportRethrow() { } private void ImportSizeOf(int token) { } private void ImportUnalignedPrefix(byte alignment) { } private void ImportVolatilePrefix() { } private void ImportTailPrefix() { } private void ImportNoPrefix(byte mask) { } private void ImportThrow() { } private void ImportInitObj(int token) { } private void ImportLoadLength() { } private void ImportEndFinally() { } } }
1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.RyuJit.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Diagnostics; using System.IO; using System.Runtime.InteropServices; using Internal.IL; using Internal.TypeSystem; using Internal.ReadyToRunConstants; using ILCompiler; using ILCompiler.DependencyAnalysis; using DependencyList = ILCompiler.DependencyAnalysisFramework.DependencyNodeCore<ILCompiler.DependencyAnalysis.NodeFactory>.DependencyList; #if SUPPORT_JIT using MethodCodeNode = Internal.Runtime.JitSupport.JitMethodCodeNode; using RyuJitCompilation = ILCompiler.Compilation; #endif namespace Internal.JitInterface { unsafe partial class CorInfoImpl { private const CORINFO_RUNTIME_ABI TargetABI = CORINFO_RUNTIME_ABI.CORINFO_CORERT_ABI; private uint OffsetOfDelegateFirstTarget => (uint)(4 * PointerSize); // Delegate::m_functionPointer private int SizeOfReversePInvokeTransitionFrame => 2 * PointerSize; private RyuJitCompilation _compilation; private MethodDebugInformation _debugInfo; private MethodCodeNode _methodCodeNode; private DebugLocInfo[] _debugLocInfos; private DebugVarInfo[] _debugVarInfos; private readonly UnboxingMethodDescFactory _unboxingThunkFactory = new UnboxingMethodDescFactory(); private bool _isFallbackBodyCompilation; private DependencyList _additionalDependencies; public CorInfoImpl(RyuJitCompilation compilation) : this() { _compilation = compilation; } private MethodDesc getUnboxingThunk(MethodDesc method) { return _unboxingThunkFactory.GetUnboxingMethod(method); } public void CompileMethod(MethodCodeNode methodCodeNodeNeedingCode, MethodIL methodIL = null) { _methodCodeNode = methodCodeNodeNeedingCode; _isFallbackBodyCompilation = methodIL != null; if (methodIL == null) methodIL = _compilation.GetMethodIL(MethodBeingCompiled); try { CompileMethodInternal(methodCodeNodeNeedingCode, methodIL); } finally { #if DEBUG // RyuJIT makes assumptions around the value of type symbols - in particular, it assumes // that type handles and type symbols have a 1:1 relationship. We therefore need to // make sure RyuJIT never sees a constructed and unconstructed type symbol for the // same type. This check makes sure we didn't accidentally hand out a necessary type symbol // that the compilation class didn't agree to handing out. // https://github.com/dotnet/runtimelab/issues/1128 for (int i = 0; i < _codeRelocs.Count; i++) { Debug.Assert(_codeRelocs[i].Target.GetType() != typeof(EETypeNode) || _compilation.NecessaryTypeSymbolIfPossible(((EETypeNode)_codeRelocs[i].Target).Type) == _codeRelocs[i].Target); } #endif CompileMethodCleanup(); } } private enum CFI_OPCODE { CFI_ADJUST_CFA_OFFSET, // Offset is adjusted relative to the current one. CFI_DEF_CFA_REGISTER, // New register is used to compute CFA CFI_REL_OFFSET, // Register is saved at offset from the current CFA CFI_DEF_CFA // Take address from register and add offset to it. } // Get the CFI data in the same shape as clang/LLVM generated one. This improves the compatibility with libunwind and other unwind solutions // - Combine in one single block for the whole prolog instead of one CFI block per assembler instruction // - Store CFA definition first // - Store all used registers in ascending order private byte[] CompressARM64CFI(byte[] blobData) { if (blobData == null || blobData.Length == 0) { return blobData; } Debug.Assert(blobData.Length % 8 == 0); short spReg = -1; int codeOffset = 0; short cfaRegister = spReg; int cfaOffset = 0; int spOffset = 0; int[] registerOffset = new int[96]; for (int i = 0; i < registerOffset.Length; i++) { registerOffset[i] = int.MinValue; } int offset = 0; while (offset < blobData.Length) { codeOffset = Math.Max(codeOffset, blobData[offset++]); CFI_OPCODE opcode = (CFI_OPCODE)blobData[offset++]; short dwarfReg = BitConverter.ToInt16(blobData, offset); offset += sizeof(short); int cfiOffset = BitConverter.ToInt32(blobData, offset); offset += sizeof(int); switch (opcode) { case CFI_OPCODE.CFI_DEF_CFA_REGISTER: cfaRegister = dwarfReg; if (spOffset != 0) { for (int i = 0; i < registerOffset.Length; i++) { if (registerOffset[i] != int.MinValue) { registerOffset[i] -= spOffset; } } cfaOffset += spOffset; spOffset = 0; } break; case CFI_OPCODE.CFI_REL_OFFSET: Debug.Assert(cfaRegister == spReg); registerOffset[dwarfReg] = cfiOffset; break; case CFI_OPCODE.CFI_ADJUST_CFA_OFFSET: if (cfaRegister != spReg) { cfaOffset += cfiOffset; } else { spOffset += cfiOffset; for (int i = 0; i < registerOffset.Length; i++) { if (registerOffset[i] != int.MinValue) { registerOffset[i] += cfiOffset; } } } break; } } using (MemoryStream cfiStream = new MemoryStream()) { int storeOffset = 0; using (BinaryWriter cfiWriter = new BinaryWriter(cfiStream)) { if (cfaRegister != -1) { cfiWriter.Write((byte)codeOffset); cfiWriter.Write(cfaOffset != 0 ? (byte)CFI_OPCODE.CFI_DEF_CFA : (byte)CFI_OPCODE.CFI_DEF_CFA_REGISTER); cfiWriter.Write(cfaRegister); cfiWriter.Write(cfaOffset); storeOffset = cfaOffset; } else { if (cfaOffset != 0) { cfiWriter.Write((byte)codeOffset); cfiWriter.Write((byte)CFI_OPCODE.CFI_ADJUST_CFA_OFFSET); cfiWriter.Write((short)-1); cfiWriter.Write(cfaOffset); } if (spOffset != 0) { cfiWriter.Write((byte)codeOffset); cfiWriter.Write((byte)CFI_OPCODE.CFI_DEF_CFA); cfiWriter.Write((short)31); cfiWriter.Write(spOffset); } } for (int i = registerOffset.Length - 1; i >= 0; i--) { if (registerOffset[i] != int.MinValue) { cfiWriter.Write((byte)codeOffset); cfiWriter.Write((byte)CFI_OPCODE.CFI_REL_OFFSET); cfiWriter.Write((short)i); cfiWriter.Write(registerOffset[i] + storeOffset); } } } return cfiStream.ToArray(); } } private CORINFO_RUNTIME_LOOKUP_KIND GetLookupKindFromContextSource(GenericContextSource contextSource) { switch (contextSource) { case GenericContextSource.MethodParameter: return CORINFO_RUNTIME_LOOKUP_KIND.CORINFO_LOOKUP_METHODPARAM; case GenericContextSource.TypeParameter: return CORINFO_RUNTIME_LOOKUP_KIND.CORINFO_LOOKUP_CLASSPARAM; default: Debug.Assert(contextSource == GenericContextSource.ThisObject); return CORINFO_RUNTIME_LOOKUP_KIND.CORINFO_LOOKUP_THISOBJ; } } private void ComputeLookup(ref CORINFO_RESOLVED_TOKEN pResolvedToken, object entity, ReadyToRunHelperId helperId, ref CORINFO_LOOKUP lookup) { if (_compilation.NeedsRuntimeLookup(helperId, entity)) { lookup.lookupKind.needsRuntimeLookup = true; lookup.runtimeLookup.signature = null; // Do not bother computing the runtime lookup if we are inlining. The JIT is going // to abort the inlining attempt anyway. if (pResolvedToken.tokenContext != contextFromMethodBeingCompiled()) { lookup.lookupKind.runtimeLookupKind = CORINFO_RUNTIME_LOOKUP_KIND.CORINFO_LOOKUP_NOT_SUPPORTED; return; } MethodDesc contextMethod = methodFromContext(pResolvedToken.tokenContext); GenericDictionaryLookup genericLookup = _compilation.ComputeGenericLookup(contextMethod, helperId, entity); if (genericLookup.UseHelper) { lookup.runtimeLookup.indirections = CORINFO.USEHELPER; lookup.lookupKind.runtimeLookupFlags = (ushort)genericLookup.HelperId; lookup.lookupKind.runtimeLookupArgs = (void*)ObjectToHandle(genericLookup.HelperObject); } else { if (genericLookup.ContextSource == GenericContextSource.MethodParameter) { lookup.runtimeLookup.helper = CorInfoHelpFunc.CORINFO_HELP_RUNTIMEHANDLE_METHOD; } else { lookup.runtimeLookup.helper = CorInfoHelpFunc.CORINFO_HELP_RUNTIMEHANDLE_CLASS; } lookup.runtimeLookup.indirections = (ushort)(genericLookup.NumberOfIndirections + (genericLookup.IndirectLastOffset ? 1 : 0)); lookup.runtimeLookup.offset0 = (IntPtr)genericLookup[0]; if (genericLookup.NumberOfIndirections > 1) { lookup.runtimeLookup.offset1 = (IntPtr)genericLookup[1]; if (genericLookup.IndirectLastOffset) lookup.runtimeLookup.offset2 = IntPtr.Zero; } else if (genericLookup.IndirectLastOffset) { lookup.runtimeLookup.offset1 = IntPtr.Zero; } lookup.runtimeLookup.sizeOffset = CORINFO.CORINFO_NO_SIZE_CHECK; lookup.runtimeLookup.testForFixup = false; // TODO: this will be needed in true multifile lookup.runtimeLookup.testForNull = false; lookup.runtimeLookup.indirectFirstOffset = false; lookup.runtimeLookup.indirectSecondOffset = false; lookup.lookupKind.runtimeLookupFlags = 0; lookup.lookupKind.runtimeLookupArgs = null; } lookup.lookupKind.runtimeLookupKind = GetLookupKindFromContextSource(genericLookup.ContextSource); } else { lookup.lookupKind.needsRuntimeLookup = false; ISymbolNode constLookup = _compilation.ComputeConstantLookup(helperId, entity); lookup.constLookup = CreateConstLookupToSymbol(constLookup); } } private bool getReadyToRunHelper(ref CORINFO_RESOLVED_TOKEN pResolvedToken, ref CORINFO_LOOKUP_KIND pGenericLookupKind, CorInfoHelpFunc id, ref CORINFO_CONST_LOOKUP pLookup) { switch (id) { case CorInfoHelpFunc.CORINFO_HELP_READYTORUN_NEW: case CorInfoHelpFunc.CORINFO_HELP_READYTORUN_NEWARR_1: case CorInfoHelpFunc.CORINFO_HELP_READYTORUN_ISINSTANCEOF: case CorInfoHelpFunc.CORINFO_HELP_READYTORUN_CHKCAST: return false; case CorInfoHelpFunc.CORINFO_HELP_READYTORUN_STATIC_BASE: { var type = HandleToObject(pResolvedToken.hClass); if (type.IsCanonicalSubtype(CanonicalFormKind.Any)) return false; pLookup = CreateConstLookupToSymbol(_compilation.NodeFactory.ReadyToRunHelper(ReadyToRunHelperId.GetNonGCStaticBase, type)); } break; case CorInfoHelpFunc.CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE: { // Token == 0 means "initialize this class". We only expect RyuJIT to call it for this case. Debug.Assert(pResolvedToken.token == 0 && pResolvedToken.tokenScope == null); Debug.Assert(pGenericLookupKind.needsRuntimeLookup); DefType typeToInitialize = (DefType)MethodBeingCompiled.OwningType; Debug.Assert(typeToInitialize.IsCanonicalSubtype(CanonicalFormKind.Any)); DefType helperArg = typeToInitialize.ConvertToSharedRuntimeDeterminedForm(); ISymbolNode helper = GetGenericLookupHelper(pGenericLookupKind.runtimeLookupKind, ReadyToRunHelperId.GetNonGCStaticBase, helperArg); pLookup = CreateConstLookupToSymbol(helper); } break; case CorInfoHelpFunc.CORINFO_HELP_READYTORUN_GENERIC_HANDLE: { Debug.Assert(pGenericLookupKind.needsRuntimeLookup); ReadyToRunHelperId helperId = (ReadyToRunHelperId)pGenericLookupKind.runtimeLookupFlags; object helperArg = HandleToObject((IntPtr)pGenericLookupKind.runtimeLookupArgs); ISymbolNode helper = GetGenericLookupHelper(pGenericLookupKind.runtimeLookupKind, helperId, helperArg); pLookup = CreateConstLookupToSymbol(helper); } break; default: throw new NotImplementedException("ReadyToRun: " + id.ToString()); } return true; } private void getReadyToRunDelegateCtorHelper(ref CORINFO_RESOLVED_TOKEN pTargetMethod, CORINFO_CLASS_STRUCT_* delegateType, ref CORINFO_LOOKUP pLookup) { #if DEBUG // In debug, write some bogus data to the struct to ensure we have filled everything // properly. fixed (CORINFO_LOOKUP* tmp = &pLookup) MemoryHelper.FillMemory((byte*)tmp, 0xcc, sizeof(CORINFO_LOOKUP)); #endif MethodDesc targetMethod = HandleToObject(pTargetMethod.hMethod); TypeDesc delegateTypeDesc = HandleToObject(delegateType); if (targetMethod.IsSharedByGenericInstantiations) { // If the method is not exact, fetch it as a runtime determined method. targetMethod = (MethodDesc)GetRuntimeDeterminedObjectForToken(ref pTargetMethod); } bool isLdvirtftn = pTargetMethod.tokenType == CorInfoTokenKind.CORINFO_TOKENKIND_Ldvirtftn; DelegateCreationInfo delegateInfo = _compilation.GetDelegateCtor(delegateTypeDesc, targetMethod, isLdvirtftn); if (delegateInfo.NeedsRuntimeLookup) { pLookup.lookupKind.needsRuntimeLookup = true; MethodDesc contextMethod = methodFromContext(pTargetMethod.tokenContext); // We should not be inlining these. RyuJIT should have aborted inlining already. Debug.Assert(contextMethod == MethodBeingCompiled); pLookup.lookupKind.runtimeLookupKind = GetGenericRuntimeLookupKind(contextMethod); pLookup.lookupKind.runtimeLookupFlags = (ushort)ReadyToRunHelperId.DelegateCtor; pLookup.lookupKind.runtimeLookupArgs = (void*)ObjectToHandle(delegateInfo); } else { pLookup.lookupKind.needsRuntimeLookup = false; pLookup.constLookup = CreateConstLookupToSymbol(_compilation.NodeFactory.ReadyToRunHelper(ReadyToRunHelperId.DelegateCtor, delegateInfo)); } } private ISymbolNode GetHelperFtnUncached(CorInfoHelpFunc ftnNum) { ReadyToRunHelper id; switch (ftnNum) { case CorInfoHelpFunc.CORINFO_HELP_THROW: id = ReadyToRunHelper.Throw; break; case CorInfoHelpFunc.CORINFO_HELP_RETHROW: id = ReadyToRunHelper.Rethrow; break; case CorInfoHelpFunc.CORINFO_HELP_USER_BREAKPOINT: id = ReadyToRunHelper.DebugBreak; break; case CorInfoHelpFunc.CORINFO_HELP_OVERFLOW: id = ReadyToRunHelper.Overflow; break; case CorInfoHelpFunc.CORINFO_HELP_RNGCHKFAIL: id = ReadyToRunHelper.RngChkFail; break; case CorInfoHelpFunc.CORINFO_HELP_FAIL_FAST: id = ReadyToRunHelper.FailFast; break; case CorInfoHelpFunc.CORINFO_HELP_THROWNULLREF: id = ReadyToRunHelper.ThrowNullRef; break; case CorInfoHelpFunc.CORINFO_HELP_THROWDIVZERO: id = ReadyToRunHelper.ThrowDivZero; break; case CorInfoHelpFunc.CORINFO_HELP_THROW_ARGUMENTOUTOFRANGEEXCEPTION: id = ReadyToRunHelper.ThrowArgumentOutOfRange; break; case CorInfoHelpFunc.CORINFO_HELP_THROW_ARGUMENTEXCEPTION: id = ReadyToRunHelper.ThrowArgument; break; case CorInfoHelpFunc.CORINFO_HELP_THROW_NOT_IMPLEMENTED: id = ReadyToRunHelper.ThrowNotImplemented; break; case CorInfoHelpFunc.CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED: id = ReadyToRunHelper.ThrowPlatformNotSupported; break; case CorInfoHelpFunc.CORINFO_HELP_ASSIGN_REF: id = ReadyToRunHelper.WriteBarrier; break; case CorInfoHelpFunc.CORINFO_HELP_CHECKED_ASSIGN_REF: id = ReadyToRunHelper.CheckedWriteBarrier; break; case CorInfoHelpFunc.CORINFO_HELP_ASSIGN_BYREF: id = ReadyToRunHelper.ByRefWriteBarrier; break; case CorInfoHelpFunc.CORINFO_HELP_ASSIGN_REF_EAX: id = ReadyToRunHelper.WriteBarrier_EAX; break; case CorInfoHelpFunc.CORINFO_HELP_ASSIGN_REF_ECX: id = ReadyToRunHelper.WriteBarrier_ECX; break; case CorInfoHelpFunc.CORINFO_HELP_CHECKED_ASSIGN_REF_EAX: id = ReadyToRunHelper.CheckedWriteBarrier_EAX; break; case CorInfoHelpFunc.CORINFO_HELP_CHECKED_ASSIGN_REF_ECX: id = ReadyToRunHelper.CheckedWriteBarrier_ECX; break; case CorInfoHelpFunc.CORINFO_HELP_ARRADDR_ST: id = ReadyToRunHelper.Stelem_Ref; break; case CorInfoHelpFunc.CORINFO_HELP_LDELEMA_REF: id = ReadyToRunHelper.Ldelema_Ref; break; case CorInfoHelpFunc.CORINFO_HELP_MEMSET: id = ReadyToRunHelper.MemSet; break; case CorInfoHelpFunc.CORINFO_HELP_MEMCPY: id = ReadyToRunHelper.MemCpy; break; case CorInfoHelpFunc.CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE: id = ReadyToRunHelper.GetRuntimeType; break; case CorInfoHelpFunc.CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD: id = ReadyToRunHelper.GetRuntimeMethodHandle; break; case CorInfoHelpFunc.CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD: id = ReadyToRunHelper.GetRuntimeFieldHandle; break; case CorInfoHelpFunc.CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE: id = ReadyToRunHelper.GetRuntimeTypeHandle; break; case CorInfoHelpFunc.CORINFO_HELP_ARE_TYPES_EQUIVALENT: id = ReadyToRunHelper.AreTypesEquivalent; break; case CorInfoHelpFunc.CORINFO_HELP_BOX: id = ReadyToRunHelper.Box; break; case CorInfoHelpFunc.CORINFO_HELP_BOX_NULLABLE: id = ReadyToRunHelper.Box_Nullable; break; case CorInfoHelpFunc.CORINFO_HELP_UNBOX: id = ReadyToRunHelper.Unbox; break; case CorInfoHelpFunc.CORINFO_HELP_UNBOX_NULLABLE: id = ReadyToRunHelper.Unbox_Nullable; break; case CorInfoHelpFunc.CORINFO_HELP_NEW_MDARR: id = ReadyToRunHelper.NewMultiDimArr; break; case CorInfoHelpFunc.CORINFO_HELP_NEWFAST: id = ReadyToRunHelper.NewObject; break; case CorInfoHelpFunc.CORINFO_HELP_NEWSFAST: return _compilation.NodeFactory.ExternSymbol("RhpNewFast"); case CorInfoHelpFunc.CORINFO_HELP_NEWSFAST_FINALIZE: return _compilation.NodeFactory.ExternSymbol("RhpNewFinalizable"); case CorInfoHelpFunc.CORINFO_HELP_NEWSFAST_ALIGN8: return _compilation.NodeFactory.ExternSymbol("RhpNewFastAlign8"); case CorInfoHelpFunc.CORINFO_HELP_NEWSFAST_ALIGN8_FINALIZE: return _compilation.NodeFactory.ExternSymbol("RhpNewFinalizableAlign8"); case CorInfoHelpFunc.CORINFO_HELP_NEWSFAST_ALIGN8_VC: return _compilation.NodeFactory.ExternSymbol("RhpNewFastMisalign"); case CorInfoHelpFunc.CORINFO_HELP_NEWARR_1_DIRECT: id = ReadyToRunHelper.NewArray; break; case CorInfoHelpFunc.CORINFO_HELP_NEWARR_1_ALIGN8: return _compilation.NodeFactory.ExternSymbol("RhpNewArrayAlign8"); case CorInfoHelpFunc.CORINFO_HELP_NEWARR_1_VC: return _compilation.NodeFactory.ExternSymbol("RhpNewArray"); case CorInfoHelpFunc.CORINFO_HELP_STACK_PROBE: return _compilation.NodeFactory.ExternSymbol("RhpStackProbe"); case CorInfoHelpFunc.CORINFO_HELP_POLL_GC: return _compilation.NodeFactory.ExternSymbol("RhpGcPoll"); case CorInfoHelpFunc.CORINFO_HELP_LMUL: id = ReadyToRunHelper.LMul; break; case CorInfoHelpFunc.CORINFO_HELP_LMUL_OVF: id = ReadyToRunHelper.LMulOfv; break; case CorInfoHelpFunc.CORINFO_HELP_ULMUL_OVF: id = ReadyToRunHelper.ULMulOvf; break; case CorInfoHelpFunc.CORINFO_HELP_LDIV: id = ReadyToRunHelper.LDiv; break; case CorInfoHelpFunc.CORINFO_HELP_LMOD: id = ReadyToRunHelper.LMod; break; case CorInfoHelpFunc.CORINFO_HELP_ULDIV: id = ReadyToRunHelper.ULDiv; break; case CorInfoHelpFunc.CORINFO_HELP_ULMOD: id = ReadyToRunHelper.ULMod; break; case CorInfoHelpFunc.CORINFO_HELP_LLSH: id = ReadyToRunHelper.LLsh; break; case CorInfoHelpFunc.CORINFO_HELP_LRSH: id = ReadyToRunHelper.LRsh; break; case CorInfoHelpFunc.CORINFO_HELP_LRSZ: id = ReadyToRunHelper.LRsz; break; case CorInfoHelpFunc.CORINFO_HELP_LNG2DBL: id = ReadyToRunHelper.Lng2Dbl; break; case CorInfoHelpFunc.CORINFO_HELP_ULNG2DBL: id = ReadyToRunHelper.ULng2Dbl; break; case CorInfoHelpFunc.CORINFO_HELP_DIV: id = ReadyToRunHelper.Div; break; case CorInfoHelpFunc.CORINFO_HELP_MOD: id = ReadyToRunHelper.Mod; break; case CorInfoHelpFunc.CORINFO_HELP_UDIV: id = ReadyToRunHelper.UDiv; break; case CorInfoHelpFunc.CORINFO_HELP_UMOD: id = ReadyToRunHelper.UMod; break; case CorInfoHelpFunc.CORINFO_HELP_DBL2INT: id = ReadyToRunHelper.Dbl2Int; break; case CorInfoHelpFunc.CORINFO_HELP_DBL2INT_OVF: id = ReadyToRunHelper.Dbl2IntOvf; break; case CorInfoHelpFunc.CORINFO_HELP_DBL2LNG: id = ReadyToRunHelper.Dbl2Lng; break; case CorInfoHelpFunc.CORINFO_HELP_DBL2LNG_OVF: id = ReadyToRunHelper.Dbl2LngOvf; break; case CorInfoHelpFunc.CORINFO_HELP_DBL2UINT: id = ReadyToRunHelper.Dbl2UInt; break; case CorInfoHelpFunc.CORINFO_HELP_DBL2UINT_OVF: id = ReadyToRunHelper.Dbl2UIntOvf; break; case CorInfoHelpFunc.CORINFO_HELP_DBL2ULNG: id = ReadyToRunHelper.Dbl2ULng; break; case CorInfoHelpFunc.CORINFO_HELP_DBL2ULNG_OVF: id = ReadyToRunHelper.Dbl2ULngOvf; break; case CorInfoHelpFunc.CORINFO_HELP_FLTREM: id = ReadyToRunHelper.FltRem; break; case CorInfoHelpFunc.CORINFO_HELP_DBLREM: id = ReadyToRunHelper.DblRem; break; case CorInfoHelpFunc.CORINFO_HELP_FLTROUND: id = ReadyToRunHelper.FltRound; break; case CorInfoHelpFunc.CORINFO_HELP_DBLROUND: id = ReadyToRunHelper.DblRound; break; case CorInfoHelpFunc.CORINFO_HELP_JIT_PINVOKE_BEGIN: id = ReadyToRunHelper.PInvokeBegin; break; case CorInfoHelpFunc.CORINFO_HELP_JIT_PINVOKE_END: id = ReadyToRunHelper.PInvokeEnd; break; case CorInfoHelpFunc.CORINFO_HELP_JIT_REVERSE_PINVOKE_ENTER: id = ReadyToRunHelper.ReversePInvokeEnter; break; case CorInfoHelpFunc.CORINFO_HELP_JIT_REVERSE_PINVOKE_EXIT: id = ReadyToRunHelper.ReversePInvokeExit; break; case CorInfoHelpFunc.CORINFO_HELP_CHKCASTANY: id = ReadyToRunHelper.CheckCastAny; break; case CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFANY: id = ReadyToRunHelper.CheckInstanceAny; break; case CorInfoHelpFunc.CORINFO_HELP_CHKCASTCLASS: case CorInfoHelpFunc.CORINFO_HELP_CHKCASTCLASS_SPECIAL: // TODO: separate helper for the _SPECIAL case id = ReadyToRunHelper.CheckCastClass; break; case CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFCLASS: id = ReadyToRunHelper.CheckInstanceClass; break; case CorInfoHelpFunc.CORINFO_HELP_CHKCASTARRAY: id = ReadyToRunHelper.CheckCastArray; break; case CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFARRAY: id = ReadyToRunHelper.CheckInstanceArray; break; case CorInfoHelpFunc.CORINFO_HELP_CHKCASTINTERFACE: id = ReadyToRunHelper.CheckCastInterface; break; case CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFINTERFACE: id = ReadyToRunHelper.CheckInstanceInterface; break; case CorInfoHelpFunc.CORINFO_HELP_MON_ENTER: id = ReadyToRunHelper.MonitorEnter; break; case CorInfoHelpFunc.CORINFO_HELP_MON_EXIT: id = ReadyToRunHelper.MonitorExit; break; case CorInfoHelpFunc.CORINFO_HELP_MON_ENTER_STATIC: id = ReadyToRunHelper.MonitorEnterStatic; break; case CorInfoHelpFunc.CORINFO_HELP_MON_EXIT_STATIC: id = ReadyToRunHelper.MonitorExitStatic; break; case CorInfoHelpFunc.CORINFO_HELP_GVMLOOKUP_FOR_SLOT: id = ReadyToRunHelper.GVMLookupForSlot; break; case CorInfoHelpFunc.CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL: id = ReadyToRunHelper.TypeHandleToRuntimeType; break; case CorInfoHelpFunc.CORINFO_HELP_GETREFANY: id = ReadyToRunHelper.GetRefAny; break; case CorInfoHelpFunc.CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL: id = ReadyToRunHelper.TypeHandleToRuntimeTypeHandle; break; case CorInfoHelpFunc.CORINFO_HELP_GETCURRENTMANAGEDTHREADID: id = ReadyToRunHelper.GetCurrentManagedThreadId; break; case CorInfoHelpFunc.CORINFO_HELP_VALIDATE_INDIRECT_CALL: return _compilation.NodeFactory.ExternIndirectSymbol("__guard_check_icall_fptr"); case CorInfoHelpFunc.CORINFO_HELP_DISPATCH_INDIRECT_CALL: return _compilation.NodeFactory.ExternIndirectSymbol("__guard_dispatch_icall_fptr"); default: throw new NotImplementedException(ftnNum.ToString()); } string mangledName; MethodDesc methodDesc; JitHelper.GetEntryPoint(_compilation.TypeSystemContext, id, out mangledName, out methodDesc); Debug.Assert(mangledName != null || methodDesc != null); ISymbolNode entryPoint; if (mangledName != null) entryPoint = _compilation.NodeFactory.ExternSymbol(mangledName); else entryPoint = _compilation.NodeFactory.MethodEntrypoint(methodDesc); return entryPoint; } private void getFunctionEntryPoint(CORINFO_METHOD_STRUCT_* ftn, ref CORINFO_CONST_LOOKUP pResult, CORINFO_ACCESS_FLAGS accessFlags) { MethodDesc method = HandleToObject(ftn); // TODO: Implement MapMethodDeclToMethodImpl from CoreCLR if (method.IsVirtual && method.OwningType is MetadataType mdType && mdType.VirtualMethodImplsForType.Length > 0) { throw new NotImplementedException("getFunctionEntryPoint"); } pResult = CreateConstLookupToSymbol(_compilation.NodeFactory.MethodEntrypoint(method)); } private bool canTailCall(CORINFO_METHOD_STRUCT_* callerHnd, CORINFO_METHOD_STRUCT_* declaredCalleeHnd, CORINFO_METHOD_STRUCT_* exactCalleeHnd, bool fIsTailPrefix) { // Assume we can tail call unless proved otherwise bool result = true; if (!fIsTailPrefix) { MethodDesc caller = HandleToObject(callerHnd); if (caller.IsNoInlining) { // Do not tailcall from methods that are marked as noinline (people often use no-inline // to mean "I want to always see this method in stacktrace") result = false; } } return result; } private InfoAccessType constructStringLiteral(CORINFO_MODULE_STRUCT_* module, mdToken metaTok, ref void* ppValue) { MethodIL methodIL = (MethodIL)HandleToObject((IntPtr)module); ISymbolNode stringObject; if (metaTok == (mdToken)CorConstants.CorTokenType.mdtString) { stringObject = _compilation.NodeFactory.SerializedStringObject(""); } else { object literal = methodIL.GetObject((int)metaTok); stringObject = _compilation.NodeFactory.SerializedStringObject((string)literal); } ppValue = (void*)ObjectToHandle(stringObject); return stringObject.RepresentsIndirectionCell ? InfoAccessType.IAT_PVALUE : InfoAccessType.IAT_VALUE; } enum RhEHClauseKind { RH_EH_CLAUSE_TYPED = 0, RH_EH_CLAUSE_FAULT = 1, RH_EH_CLAUSE_FILTER = 2 } private ObjectNode.ObjectData EncodeEHInfo() { var builder = new ObjectDataBuilder(); builder.RequireInitialAlignment(1); int totalClauses = _ehClauses.Length; // Count the number of special markers that will be needed for (int i = 1; i < _ehClauses.Length; i++) { ref CORINFO_EH_CLAUSE clause = ref _ehClauses[i]; ref CORINFO_EH_CLAUSE previousClause = ref _ehClauses[i - 1]; if ((previousClause.TryOffset == clause.TryOffset) && (previousClause.TryLength == clause.TryLength) && ((clause.Flags & CORINFO_EH_CLAUSE_FLAGS.CORINFO_EH_CLAUSE_SAMETRY) == 0)) { totalClauses++; } } builder.EmitCompressedUInt((uint)totalClauses); for (int i = 0; i < _ehClauses.Length; i++) { ref CORINFO_EH_CLAUSE clause = ref _ehClauses[i]; if (i > 0) { ref CORINFO_EH_CLAUSE previousClause = ref _ehClauses[i - 1]; // If the previous clause has same try offset and length as the current clause, // but belongs to a different try block (CORINFO_EH_CLAUSE_SAMETRY is not set), // emit a special marker to allow runtime distinguish this case. if ((previousClause.TryOffset == clause.TryOffset) && (previousClause.TryLength == clause.TryLength) && ((clause.Flags & CORINFO_EH_CLAUSE_FLAGS.CORINFO_EH_CLAUSE_SAMETRY) == 0)) { builder.EmitCompressedUInt(0); builder.EmitCompressedUInt((uint)RhEHClauseKind.RH_EH_CLAUSE_FAULT); builder.EmitCompressedUInt(0); } } RhEHClauseKind clauseKind; if (((clause.Flags & CORINFO_EH_CLAUSE_FLAGS.CORINFO_EH_CLAUSE_FAULT) != 0) || ((clause.Flags & CORINFO_EH_CLAUSE_FLAGS.CORINFO_EH_CLAUSE_FINALLY) != 0)) { clauseKind = RhEHClauseKind.RH_EH_CLAUSE_FAULT; } else if ((clause.Flags & CORINFO_EH_CLAUSE_FLAGS.CORINFO_EH_CLAUSE_FILTER) != 0) { clauseKind = RhEHClauseKind.RH_EH_CLAUSE_FILTER; } else { clauseKind = RhEHClauseKind.RH_EH_CLAUSE_TYPED; } builder.EmitCompressedUInt((uint)clause.TryOffset); // clause.TryLength returned by the JIT is actually end offset... // https://github.com/dotnet/runtime/issues/5282 int tryLength = (int)clause.TryLength - (int)clause.TryOffset; builder.EmitCompressedUInt((uint)((tryLength << 2) | (int)clauseKind)); switch (clauseKind) { case RhEHClauseKind.RH_EH_CLAUSE_TYPED: { builder.EmitCompressedUInt(clause.HandlerOffset); var methodIL = (MethodIL)HandleToObject((IntPtr)_methodScope); var type = (TypeDesc)methodIL.GetObject((int)clause.ClassTokenOrOffset); // Once https://github.com/dotnet/corert/issues/3460 is done, this should be an assert. // Throwing InvalidProgram is not great, but we want to do *something* if this happens // because doing nothing means problems at runtime. This is not worth piping a // a new exception with a fancy message for. if (type.IsCanonicalSubtype(CanonicalFormKind.Any)) ThrowHelper.ThrowInvalidProgramException(); var typeSymbol = _compilation.NecessaryTypeSymbolIfPossible(type); RelocType rel = (_compilation.NodeFactory.Target.IsWindows) ? RelocType.IMAGE_REL_BASED_ABSOLUTE : RelocType.IMAGE_REL_BASED_RELPTR32; if (_compilation.NodeFactory.Target.Abi == TargetAbi.Jit) rel = RelocType.IMAGE_REL_BASED_REL32; builder.EmitReloc(typeSymbol, rel); } break; case RhEHClauseKind.RH_EH_CLAUSE_FAULT: builder.EmitCompressedUInt(clause.HandlerOffset); break; case RhEHClauseKind.RH_EH_CLAUSE_FILTER: builder.EmitCompressedUInt(clause.HandlerOffset); builder.EmitCompressedUInt(clause.ClassTokenOrOffset); break; } } return builder.ToObjectData(); } private void setVars(CORINFO_METHOD_STRUCT_* ftn, uint cVars, NativeVarInfo* vars) { var methodIL = (MethodIL)HandleToObject((IntPtr)_methodScope); MethodSignature sig = methodIL.OwningMethod.Signature; int numLocals = methodIL.GetLocals().Length; ArrayBuilder<DebugVarRangeInfo>[] debugVarInfoBuilders = new ArrayBuilder<DebugVarRangeInfo>[(sig.IsStatic ? 0 : 1) + sig.Length + numLocals]; for (uint i = 0; i < cVars; i++) { uint varNumber = vars[i].varNumber; if (varNumber < debugVarInfoBuilders.Length) debugVarInfoBuilders[varNumber].Add(new DebugVarRangeInfo(vars[i].startOffset, vars[i].endOffset, vars[i].varLoc)); } var debugVarInfos = new ArrayBuilder<DebugVarInfo>(); for (uint i = 0; i < debugVarInfoBuilders.Length; i++) { if (debugVarInfoBuilders[i].Count > 0) { debugVarInfos.Add(new DebugVarInfo(i, debugVarInfoBuilders[i].ToArray())); } } _debugVarInfos = debugVarInfos.ToArray(); // JIT gave the ownership of this to us, so need to free this. freeArray(vars); } /// <summary> /// Create a DebugLocInfo which is a table from native offset to source line. /// using native to il offset (pMap) and il to source line (_sequencePoints). /// </summary> private void setBoundaries(CORINFO_METHOD_STRUCT_* ftn, uint cMap, OffsetMapping* pMap) { Debug.Assert(_debugLocInfos == null); int largestILOffset = 0; // All epiloges point to the largest IL offset. for (int i = 0; i < cMap; i++) { OffsetMapping nativeToILInfo = pMap[i]; int currectILOffset = (int)nativeToILInfo.ilOffset; if (currectILOffset > largestILOffset) // Special offsets are negative. { largestILOffset = currectILOffset; } } ArrayBuilder<DebugLocInfo> debugLocInfos = new ArrayBuilder<DebugLocInfo>(); for (int i = 0; i < cMap; i++) { OffsetMapping* nativeToILInfo = &pMap[i]; int ilOffset = (int)nativeToILInfo->ilOffset; switch (ilOffset) { case (int)MappingTypes.PROLOG: ilOffset = 0; break; case (int)MappingTypes.EPILOG: ilOffset = largestILOffset; break; case (int)MappingTypes.NO_MAPPING: continue; } debugLocInfos.Add(new DebugLocInfo((int)nativeToILInfo->nativeOffset, ilOffset)); } if (debugLocInfos.Count > 0) { _debugLocInfos = debugLocInfos.ToArray(); } freeArray(pMap); } private void SetDebugInformation(IMethodNode methodCodeNodeNeedingCode, MethodIL methodIL) { _debugInfo = _compilation.GetDebugInfo(methodIL); } private ISymbolNode GetGenericLookupHelper(CORINFO_RUNTIME_LOOKUP_KIND runtimeLookupKind, ReadyToRunHelperId helperId, object helperArgument) { if (runtimeLookupKind == CORINFO_RUNTIME_LOOKUP_KIND.CORINFO_LOOKUP_THISOBJ || runtimeLookupKind == CORINFO_RUNTIME_LOOKUP_KIND.CORINFO_LOOKUP_CLASSPARAM) { return _compilation.NodeFactory.ReadyToRunHelperFromTypeLookup(helperId, helperArgument, MethodBeingCompiled.OwningType); } Debug.Assert(runtimeLookupKind == CORINFO_RUNTIME_LOOKUP_KIND.CORINFO_LOOKUP_METHODPARAM); return _compilation.NodeFactory.ReadyToRunHelperFromDictionaryLookup(helperId, helperArgument, MethodBeingCompiled); } private CorInfoHelpFunc getCastingHelper(ref CORINFO_RESOLVED_TOKEN pResolvedToken, bool fThrowing) { TypeDesc type = HandleToObject(pResolvedToken.hClass); CorInfoHelpFunc helper; if (type.IsCanonicalDefinitionType(CanonicalFormKind.Any)) { // In shared code just use the catch-all helper for type variables, as the same // code may be used for interface/array/class instantiations // // We may be able to take advantage of constraints to select a specialized helper. // This optimizations does not seem to be warranted at the moment. helper = CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFANY; } else if (type.IsInterface) { // If it is an interface, use the fast interface helper helper = CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFINTERFACE; } else if (type.IsArray) { // If it is an array, use the fast array helper helper = CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFARRAY; } else if (type.IsDefType) { helper = CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFCLASS; #if !SUPPORT_JIT // When this assert is hit, we'll have to do something with the class checks in RyuJIT // Frozen strings might end up failing inlined checks generated by RyuJIT for sealed classes. Debug.Assert(!_compilation.NodeFactory.CompilationModuleGroup.CanHaveReferenceThroughImportTable); #endif } else { // Otherwise, use the slow helper helper = CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFANY; } if (fThrowing) { int delta = CorInfoHelpFunc.CORINFO_HELP_CHKCASTANY - CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFANY; Debug.Assert(CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFINTERFACE + delta == CorInfoHelpFunc.CORINFO_HELP_CHKCASTINTERFACE); Debug.Assert(CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFARRAY + delta == CorInfoHelpFunc.CORINFO_HELP_CHKCASTARRAY); Debug.Assert(CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFCLASS + delta == CorInfoHelpFunc.CORINFO_HELP_CHKCASTCLASS); helper += delta; } return helper; } private CorInfoHelpFunc getNewHelper(ref CORINFO_RESOLVED_TOKEN pResolvedToken, CORINFO_METHOD_STRUCT_* callerHandle, ref bool pHasSideEffects) { TypeDesc type = HandleToObject(pResolvedToken.hClass); Debug.Assert(!type.IsString && !type.IsArray && !type.IsCanonicalDefinitionType(CanonicalFormKind.Any)); pHasSideEffects = type.HasFinalizer; if (type.RequiresAlign8()) { if (type.HasFinalizer) return CorInfoHelpFunc.CORINFO_HELP_NEWSFAST_ALIGN8_FINALIZE; if (type.IsValueType) return CorInfoHelpFunc.CORINFO_HELP_NEWSFAST_ALIGN8_VC; return CorInfoHelpFunc.CORINFO_HELP_NEWSFAST_ALIGN8; } if (type.HasFinalizer) return CorInfoHelpFunc.CORINFO_HELP_NEWSFAST_FINALIZE; return CorInfoHelpFunc.CORINFO_HELP_NEWSFAST; } private CorInfoHelpFunc getNewArrHelper(CORINFO_CLASS_STRUCT_* arrayCls) { TypeDesc type = HandleToObject(arrayCls); Debug.Assert(type.IsArray); if (type.RequiresAlign8()) return CorInfoHelpFunc.CORINFO_HELP_NEWARR_1_ALIGN8; return CorInfoHelpFunc.CORINFO_HELP_NEWARR_1_VC; } private IMethodNode GetMethodEntrypoint(CORINFO_MODULE_STRUCT_* pScope, MethodDesc method) { bool isUnboxingThunk = method.IsUnboxingThunk(); if (isUnboxingThunk) { method = method.GetUnboxedMethod(); } if (method.HasInstantiation || method.OwningType.HasInstantiation) { MethodIL methodIL = (MethodIL)HandleToObject((IntPtr)pScope); _compilation.DetectGenericCycles(methodIL.OwningMethod, method); } return _compilation.NodeFactory.MethodEntrypoint(method, isUnboxingThunk); } private static bool IsTypeSpecForTypicalInstantiation(TypeDesc t) { Instantiation inst = t.Instantiation; for (int i = 0; i < inst.Length; i++) { var arg = inst[i] as SignatureTypeVariable; if (arg == null || arg.Index != i) return false; } return true; } private void getCallInfo(ref CORINFO_RESOLVED_TOKEN pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_METHOD_STRUCT_* callerHandle, CORINFO_CALLINFO_FLAGS flags, CORINFO_CALL_INFO* pResult) { #if DEBUG // In debug, write some bogus data to the struct to ensure we have filled everything // properly. MemoryHelper.FillMemory((byte*)pResult, 0xcc, Marshal.SizeOf<CORINFO_CALL_INFO>()); #endif MethodDesc method = HandleToObject(pResolvedToken.hMethod); // Spec says that a callvirt lookup ignores static methods. Since static methods // can't have the exact same signature as instance methods, a lookup that found // a static method would have never found an instance method. if (method.Signature.IsStatic && (flags & CORINFO_CALLINFO_FLAGS.CORINFO_CALLINFO_CALLVIRT) != 0) { throw new BadImageFormatException(); } TypeDesc exactType = HandleToObject(pResolvedToken.hClass); TypeDesc constrainedType = null; if ((flags & CORINFO_CALLINFO_FLAGS.CORINFO_CALLINFO_CALLVIRT) != 0 && pConstrainedResolvedToken != null) { constrainedType = HandleToObject(pConstrainedResolvedToken->hClass); } bool resolvedConstraint = false; bool forceUseRuntimeLookup = false; bool targetIsFatFunctionPointer = false; bool useFatCallTransform = false; MethodDesc methodAfterConstraintResolution = method; if (constrainedType == null) { pResult->thisTransform = CORINFO_THIS_TRANSFORM.CORINFO_NO_THIS_TRANSFORM; } else { // We have a "constrained." call. Try a partial resolve of the constraint call. Note that this // will not necessarily resolve the call exactly, since we might be compiling // shared generic code - it may just resolve it to a candidate suitable for // JIT compilation, and require a runtime lookup for the actual code pointer // to call. MethodDesc directMethod = constrainedType.GetClosestDefType().TryResolveConstraintMethodApprox(exactType, method, out forceUseRuntimeLookup); if (directMethod == null && constrainedType.IsEnum) { // Constrained calls to methods on enum methods resolve to System.Enum's methods. System.Enum is a reference // type though, so we would fail to resolve and box. We have a special path for those to avoid boxing. directMethod = _compilation.TypeSystemContext.TryResolveConstrainedEnumMethod(constrainedType, method); } if (directMethod != null) { // Either // 1. no constraint resolution at compile time (!directMethod) // OR 2. no code sharing lookup in call // OR 3. we have have resolved to an instantiating stub methodAfterConstraintResolution = directMethod; Debug.Assert(!methodAfterConstraintResolution.OwningType.IsInterface); resolvedConstraint = true; pResult->thisTransform = CORINFO_THIS_TRANSFORM.CORINFO_NO_THIS_TRANSFORM; exactType = constrainedType; } else if (constrainedType.IsValueType) { pResult->thisTransform = CORINFO_THIS_TRANSFORM.CORINFO_BOX_THIS; } else { pResult->thisTransform = CORINFO_THIS_TRANSFORM.CORINFO_DEREF_THIS; } } MethodDesc targetMethod = methodAfterConstraintResolution; // // Initialize callee context used for inlining and instantiation arguments // if (targetMethod.HasInstantiation) { pResult->contextHandle = contextFromMethod(targetMethod); pResult->exactContextNeedsRuntimeLookup = targetMethod.IsSharedByGenericInstantiations; } else { pResult->contextHandle = contextFromType(exactType); pResult->exactContextNeedsRuntimeLookup = exactType.IsCanonicalSubtype(CanonicalFormKind.Any); // Use main method as the context as long as the methods are called on the same type if (pResult->exactContextNeedsRuntimeLookup && pResolvedToken.tokenContext == contextFromMethodBeingCompiled() && constrainedType == null && exactType == MethodBeingCompiled.OwningType && // But don't allow inlining into generic methods since the generic context won't be the same. // The scanner won't be able to predict such inlinig. See https://github.com/dotnet/runtimelab/pull/489 !MethodBeingCompiled.HasInstantiation) { var methodIL = (MethodIL)HandleToObject((IntPtr)pResolvedToken.tokenScope); var rawMethod = (MethodDesc)methodIL.GetMethodILDefinition().GetObject((int)pResolvedToken.token); if (IsTypeSpecForTypicalInstantiation(rawMethod.OwningType)) { pResult->contextHandle = contextFromMethodBeingCompiled(); } } } // // Determine whether to perform direct call // bool directCall = false; bool resolvedCallVirt = false; if (targetMethod.Signature.IsStatic) { // Static methods are always direct calls directCall = true; } else if ((flags & CORINFO_CALLINFO_FLAGS.CORINFO_CALLINFO_CALLVIRT) == 0 || resolvedConstraint) { directCall = true; } else { if (!targetMethod.IsVirtual || // Final/sealed has no meaning for interfaces, but lets us devirtualize otherwise !targetMethod.OwningType.IsInterface && (targetMethod.IsFinal || targetMethod.OwningType.IsSealed())) { resolvedCallVirt = true; directCall = true; } } pResult->codePointerOrStubLookup.lookupKind.needsRuntimeLookup = false; bool allowInstParam = (flags & CORINFO_CALLINFO_FLAGS.CORINFO_CALLINFO_ALLOWINSTPARAM) != 0; if (directCall && targetMethod.IsAbstract) { ThrowHelper.ThrowBadImageFormatException(); } if (directCall && !allowInstParam && targetMethod.GetCanonMethodTarget(CanonicalFormKind.Specific).RequiresInstArg()) { // JIT needs a single address to call this method but the method needs a hidden argument. // We need a fat function pointer for this that captures both things. targetIsFatFunctionPointer = true; // JIT won't expect fat function pointers unless this is e.g. delegate creation Debug.Assert((flags & CORINFO_CALLINFO_FLAGS.CORINFO_CALLINFO_LDFTN) != 0); pResult->kind = CORINFO_CALL_KIND.CORINFO_CALL_CODE_POINTER; if (pResult->exactContextNeedsRuntimeLookup) { pResult->codePointerOrStubLookup.lookupKind.needsRuntimeLookup = true; pResult->codePointerOrStubLookup.lookupKind.runtimeLookupFlags = 0; pResult->codePointerOrStubLookup.runtimeLookup.indirections = CORINFO.USEHELPER; // Do not bother computing the runtime lookup if we are inlining. The JIT is going // to abort the inlining attempt anyway. if (pResolvedToken.tokenContext == contextFromMethodBeingCompiled()) { MethodDesc contextMethod = methodFromContext(pResolvedToken.tokenContext); pResult->codePointerOrStubLookup.lookupKind.runtimeLookupKind = GetGenericRuntimeLookupKind(contextMethod); pResult->codePointerOrStubLookup.lookupKind.runtimeLookupFlags = (ushort)ReadyToRunHelperId.MethodEntry; pResult->codePointerOrStubLookup.lookupKind.runtimeLookupArgs = (void*)ObjectToHandle(GetRuntimeDeterminedObjectForToken(ref pResolvedToken)); } else { pResult->codePointerOrStubLookup.lookupKind.runtimeLookupKind = CORINFO_RUNTIME_LOOKUP_KIND.CORINFO_LOOKUP_NOT_SUPPORTED; } } else { pResult->codePointerOrStubLookup.constLookup = CreateConstLookupToSymbol(_compilation.NodeFactory.FatFunctionPointer(targetMethod)); } } else if (directCall && resolvedConstraint && pResult->exactContextNeedsRuntimeLookup) { // We want to do a direct call to a shared method on a valuetype. We need to provide // a generic context, but the JitInterface doesn't provide a way for us to do it from here. // So we do the next best thing and ask RyuJIT to look up a fat pointer. pResult->kind = CORINFO_CALL_KIND.CORINFO_CALL_CODE_POINTER; pResult->codePointerOrStubLookup.constLookup.accessType = InfoAccessType.IAT_VALUE; pResult->nullInstanceCheck = true; // We have the canonical version of the method - find the runtime determined version. // This is simplified because we know the method is on a valuetype. Debug.Assert(targetMethod.OwningType.IsValueType); TypeDesc runtimeDeterminedConstrainedType = (TypeDesc)GetRuntimeDeterminedObjectForToken(ref *pConstrainedResolvedToken); if (forceUseRuntimeLookup) { // The below logic would incorrectly resolve the lookup into the first match we found, // but there was a compile-time ambiguity due to shared code. The correct fix should // use the ConstrainedMethodUseLookupResult dictionary entry so that the exact // dispatch can be computed with the help of the generic dictionary. // We fail the compilation here to avoid bad codegen. This is not actually an invalid program. // https://github.com/dotnet/runtimelab/issues/1431 ThrowHelper.ThrowInvalidProgramException(); } MethodDesc targetOfLookup; if (runtimeDeterminedConstrainedType.IsRuntimeDeterminedType) targetOfLookup = _compilation.TypeSystemContext.GetMethodForRuntimeDeterminedType(targetMethod.GetTypicalMethodDefinition(), (RuntimeDeterminedType)runtimeDeterminedConstrainedType); else targetOfLookup = _compilation.TypeSystemContext.GetMethodForInstantiatedType(targetMethod.GetTypicalMethodDefinition(), (InstantiatedType)runtimeDeterminedConstrainedType); if (targetOfLookup.HasInstantiation) { var methodToGetInstantiation = (MethodDesc)GetRuntimeDeterminedObjectForToken(ref pResolvedToken); targetOfLookup = targetOfLookup.MakeInstantiatedMethod(methodToGetInstantiation.Instantiation); } Debug.Assert(targetOfLookup.GetCanonMethodTarget(CanonicalFormKind.Specific) == targetMethod.GetCanonMethodTarget(CanonicalFormKind.Specific)); ComputeLookup(ref pResolvedToken, targetOfLookup, ReadyToRunHelperId.MethodEntry, ref pResult->codePointerOrStubLookup); targetIsFatFunctionPointer = true; useFatCallTransform = true; } else if (directCall) { bool referencingArrayAddressMethod = false; if (targetMethod.IsIntrinsic) { // If this is an intrinsic method with a callsite-specific expansion, this will replace // the method with a method the intrinsic expands into. If it's not the special intrinsic, // method stays unchanged. var methodIL = (MethodIL)HandleToObject((IntPtr)pResolvedToken.tokenScope); targetMethod = _compilation.ExpandIntrinsicForCallsite(targetMethod, methodIL.OwningMethod); // For multidim array Address method, we pretend the method requires a hidden instantiation argument // (even though it doesn't need one). We'll actually swap the method out for a differnt one with // a matching calling convention later. See ArrayMethod for a description. referencingArrayAddressMethod = targetMethod.IsArrayAddressMethod(); } pResult->kind = CORINFO_CALL_KIND.CORINFO_CALL; TypeDesc owningType = targetMethod.OwningType; if (owningType.IsString && targetMethod.IsConstructor) { // Calling a string constructor doesn't call the actual constructor. pResult->codePointerOrStubLookup.constLookup = CreateConstLookupToSymbol( _compilation.NodeFactory.StringAllocator(targetMethod) ); } else if (owningType.IsArray && targetMethod.IsConstructor) { // Constructors on arrays are special and don't actually have entrypoints. // That would be fine by itself and wouldn't need special casing. But // constructors on SzArray have a weird property that causes them not to have canonical forms. // int[][] has a .ctor(int32,int32) to construct the jagged array in one go, but its canonical // form of __Canon[] doesn't have the two-parameter constructor. The canonical form would need // to have an unlimited number of constructors to cover stuff like "int[][][][][][]..." pResult->codePointerOrStubLookup.constLookup = default; } else if (pResult->exactContextNeedsRuntimeLookup) { // Nothing to do... The generic handle lookup gets embedded in to the codegen // during the jitting of the call. // (Note: The generic lookup in R2R is performed by a call to a helper at runtime, not by // codegen emitted at crossgen time) targetMethod = targetMethod.GetCanonMethodTarget(CanonicalFormKind.Specific); Debug.Assert(!forceUseRuntimeLookup); pResult->codePointerOrStubLookup.constLookup = CreateConstLookupToSymbol( GetMethodEntrypoint(pResolvedToken.tokenScope, targetMethod) ); } else { MethodDesc concreteMethod = targetMethod; targetMethod = targetMethod.GetCanonMethodTarget(CanonicalFormKind.Specific); ISymbolNode instParam = null; if (targetMethod.RequiresInstMethodDescArg()) { instParam = _compilation.NodeFactory.MethodGenericDictionary(concreteMethod); } else if (targetMethod.RequiresInstMethodTableArg() || referencingArrayAddressMethod) { // Ask for a constructed type symbol because we need the vtable to get to the dictionary instParam = _compilation.NodeFactory.ConstructedTypeSymbol(concreteMethod.OwningType); } if (instParam != null) { pResult->instParamLookup = CreateConstLookupToSymbol(instParam); } pResult->codePointerOrStubLookup.constLookup = CreateConstLookupToSymbol( GetMethodEntrypoint(pResolvedToken.tokenScope, targetMethod) ); } pResult->nullInstanceCheck = resolvedCallVirt; } else if (targetMethod.HasInstantiation) { // Generic virtual method call support pResult->kind = CORINFO_CALL_KIND.CORINFO_VIRTUALCALL_LDVIRTFTN; pResult->codePointerOrStubLookup.constLookup.accessType = InfoAccessType.IAT_VALUE; pResult->nullInstanceCheck = true; MethodDesc targetOfLookup = _compilation.GetTargetOfGenericVirtualMethodCall((MethodDesc)GetRuntimeDeterminedObjectForToken(ref pResolvedToken)); _compilation.DetectGenericCycles( ((MethodILScope)HandleToObject((IntPtr)pResolvedToken.tokenScope)).OwningMethod, targetOfLookup.GetCanonMethodTarget(CanonicalFormKind.Specific)); ComputeLookup(ref pResolvedToken, targetOfLookup, ReadyToRunHelperId.MethodHandle, ref pResult->codePointerOrStubLookup); // RyuJIT will assert if we report CORINFO_CALLCONV_PARAMTYPE for a result of a ldvirtftn // We don't need an instantiation parameter, so let's just not report it. Might be nice to // move that assert to some place later though. targetIsFatFunctionPointer = true; } else if ((flags & CORINFO_CALLINFO_FLAGS.CORINFO_CALLINFO_LDFTN) == 0 && targetMethod.OwningType.IsInterface) { pResult->kind = CORINFO_CALL_KIND.CORINFO_VIRTUALCALL_STUB; if (pResult->exactContextNeedsRuntimeLookup) { ComputeLookup(ref pResolvedToken, GetRuntimeDeterminedObjectForToken(ref pResolvedToken), ReadyToRunHelperId.VirtualDispatchCell, ref pResult->codePointerOrStubLookup); Debug.Assert(pResult->codePointerOrStubLookup.lookupKind.needsRuntimeLookup); } else { pResult->codePointerOrStubLookup.lookupKind.needsRuntimeLookup = false; pResult->codePointerOrStubLookup.constLookup.accessType = InfoAccessType.IAT_PVALUE; pResult->codePointerOrStubLookup.constLookup.addr = (void*)ObjectToHandle( _compilation.NodeFactory.InterfaceDispatchCell(targetMethod #if !SUPPORT_JIT , _compilation.NameMangler.GetMangledMethodName(MethodBeingCompiled).ToString() #endif )); } pResult->nullInstanceCheck = false; } else if ((flags & CORINFO_CALLINFO_FLAGS.CORINFO_CALLINFO_LDFTN) == 0 // Canonically-equivalent types have the same vtable layout. Check the canonical form. // We don't want to accidentally ask about Foo<object, __Canon> that may or may not // be available to ask vtable questions about. // This can happen in inlining that the scanner didn't expect. && _compilation.HasFixedSlotVTable(targetMethod.OwningType.ConvertToCanonForm(CanonicalFormKind.Specific))) { pResult->kind = CORINFO_CALL_KIND.CORINFO_VIRTUALCALL_VTABLE; pResult->nullInstanceCheck = true; } else { ReadyToRunHelperId helperId; if ((flags & CORINFO_CALLINFO_FLAGS.CORINFO_CALLINFO_LDFTN) != 0) { pResult->kind = CORINFO_CALL_KIND.CORINFO_VIRTUALCALL_LDVIRTFTN; helperId = ReadyToRunHelperId.ResolveVirtualFunction; } else { // CORINFO_CALL_CODE_POINTER tells the JIT that this is indirect // call that should not be inlined. pResult->kind = CORINFO_CALL_KIND.CORINFO_CALL_CODE_POINTER; helperId = ReadyToRunHelperId.VirtualCall; } // If this is a non-interface call, we actually don't need a runtime lookup to find the target. // We don't even need to keep track of the runtime-determined method being called because the system ensures // that if e.g. Foo<__Canon>.GetHashCode is needed and we're generating a dictionary for Foo<string>, // Foo<string>.GetHashCode is needed too. if (pResult->exactContextNeedsRuntimeLookup && targetMethod.OwningType.IsInterface) { // We need JitInterface changes to fully support this. // If this is LDVIRTFTN of an interface method that is part of a verifiable delegate creation sequence, // RyuJIT is not going to use this value. Debug.Assert(helperId == ReadyToRunHelperId.ResolveVirtualFunction); pResult->exactContextNeedsRuntimeLookup = false; pResult->codePointerOrStubLookup.constLookup = CreateConstLookupToSymbol(_compilation.NodeFactory.ExternSymbol("NYI_LDVIRTFTN")); } else { pResult->exactContextNeedsRuntimeLookup = false; targetMethod = targetMethod.GetCanonMethodTarget(CanonicalFormKind.Specific); // Get the slot defining method to make sure our virtual method use tracking gets this right. // For normal C# code the targetMethod will always be newslot. MethodDesc slotDefiningMethod = targetMethod.IsNewSlot ? targetMethod : MetadataVirtualMethodAlgorithm.FindSlotDefiningMethodForVirtualMethod(targetMethod); pResult->codePointerOrStubLookup.constLookup = CreateConstLookupToSymbol( _compilation.NodeFactory.ReadyToRunHelper(helperId, slotDefiningMethod)); } // The current CoreRT ReadyToRun helpers do not handle null thisptr - ask the JIT to emit explicit null checks // TODO: Optimize this pResult->nullInstanceCheck = true; } pResult->hMethod = ObjectToHandle(targetMethod); pResult->accessAllowed = CorInfoIsAccessAllowedResult.CORINFO_ACCESS_ALLOWED; // We're pretty much done at this point. Let's grab the rest of the information that the jit is going to // need. pResult->classFlags = getClassAttribsInternal(targetMethod.OwningType); pResult->methodFlags = getMethodAttribsInternal(targetMethod); targetIsFatFunctionPointer |= (flags & CORINFO_CALLINFO_FLAGS.CORINFO_CALLINFO_CALLVIRT) != 0 && !(pResult->kind == CORINFO_CALL_KIND.CORINFO_CALL); Get_CORINFO_SIG_INFO(targetMethod, &pResult->sig, scope: null, targetIsFatFunctionPointer); if (useFatCallTransform) { pResult->sig.flags |= CorInfoSigInfoFlags.CORINFO_SIGFLAG_FAT_CALL; } if ((flags & CORINFO_CALLINFO_FLAGS.CORINFO_CALLINFO_VERIFICATION) != 0) { if (pResult->hMethod != pResolvedToken.hMethod) { pResult->verMethodFlags = getMethodAttribsInternal(targetMethod); Get_CORINFO_SIG_INFO(targetMethod, &pResult->verSig, scope: null); } else { pResult->verMethodFlags = pResult->methodFlags; pResult->verSig = pResult->sig; } } pResult->_wrapperDelegateInvoke = 0; } private CORINFO_CLASS_STRUCT_* embedClassHandle(CORINFO_CLASS_STRUCT_* handle, ref void* ppIndirection) { TypeDesc type = HandleToObject(handle); ISymbolNode typeHandleSymbol = _compilation.NecessaryTypeSymbolIfPossible(type); CORINFO_CLASS_STRUCT_* result = (CORINFO_CLASS_STRUCT_*)ObjectToHandle(typeHandleSymbol); if (typeHandleSymbol.RepresentsIndirectionCell) { ppIndirection = result; return null; } else { ppIndirection = null; return result; } } private void embedGenericHandle(ref CORINFO_RESOLVED_TOKEN pResolvedToken, bool fEmbedParent, ref CORINFO_GENERICHANDLE_RESULT pResult) { #if DEBUG // In debug, write some bogus data to the struct to ensure we have filled everything // properly. fixed (CORINFO_GENERICHANDLE_RESULT* tmp = &pResult) MemoryHelper.FillMemory((byte*)tmp, 0xcc, Marshal.SizeOf<CORINFO_GENERICHANDLE_RESULT>()); #endif ReadyToRunHelperId helperId = ReadyToRunHelperId.Invalid; object target = null; if (!fEmbedParent && pResolvedToken.hMethod != null) { MethodDesc md = HandleToObject(pResolvedToken.hMethod); TypeDesc td = HandleToObject(pResolvedToken.hClass); pResult.handleType = CorInfoGenericHandleType.CORINFO_HANDLETYPE_METHOD; Debug.Assert(md.OwningType == td); pResult.compileTimeHandle = (CORINFO_GENERIC_STRUCT_*)ObjectToHandle(md); if (pResolvedToken.tokenType == CorInfoTokenKind.CORINFO_TOKENKIND_Ldtoken) helperId = ReadyToRunHelperId.MethodHandle; else { Debug.Assert(pResolvedToken.tokenType == CorInfoTokenKind.CORINFO_TOKENKIND_Method); helperId = ReadyToRunHelperId.MethodDictionary; } target = GetRuntimeDeterminedObjectForToken(ref pResolvedToken); } else if (!fEmbedParent && pResolvedToken.hField != null) { FieldDesc fd = HandleToObject(pResolvedToken.hField); TypeDesc td = HandleToObject(pResolvedToken.hClass); pResult.handleType = CorInfoGenericHandleType.CORINFO_HANDLETYPE_FIELD; pResult.compileTimeHandle = (CORINFO_GENERIC_STRUCT_*)pResolvedToken.hField; Debug.Assert(pResolvedToken.tokenType == CorInfoTokenKind.CORINFO_TOKENKIND_Ldtoken); helperId = ReadyToRunHelperId.FieldHandle; target = GetRuntimeDeterminedObjectForToken(ref pResolvedToken); } else { TypeDesc td = HandleToObject(pResolvedToken.hClass); pResult.handleType = CorInfoGenericHandleType.CORINFO_HANDLETYPE_CLASS; pResult.compileTimeHandle = (CORINFO_GENERIC_STRUCT_*)pResolvedToken.hClass; object obj = GetRuntimeDeterminedObjectForToken(ref pResolvedToken); target = obj as TypeDesc; if (target == null) { Debug.Assert(fEmbedParent); if (obj is MethodDesc objAsMethod) { target = objAsMethod.OwningType; } else { Debug.Assert(obj is FieldDesc); target = ((FieldDesc)obj).OwningType; } } if (pResolvedToken.tokenType == CorInfoTokenKind.CORINFO_TOKENKIND_NewObj || pResolvedToken.tokenType == CorInfoTokenKind.CORINFO_TOKENKIND_Newarr || pResolvedToken.tokenType == CorInfoTokenKind.CORINFO_TOKENKIND_Box || pResolvedToken.tokenType == CorInfoTokenKind.CORINFO_TOKENKIND_Constrained) { helperId = ReadyToRunHelperId.TypeHandle; } else if (pResolvedToken.tokenType == CorInfoTokenKind.CORINFO_TOKENKIND_Casting) { helperId = ReadyToRunHelperId.TypeHandleForCasting; } else if (pResolvedToken.tokenType == CorInfoTokenKind.CORINFO_TOKENKIND_Ldtoken) { helperId = _compilation.GetLdTokenHelperForType(td); } else { helperId = ReadyToRunHelperId.NecessaryTypeHandle; } } Debug.Assert(pResult.compileTimeHandle != null); ComputeLookup(ref pResolvedToken, target, helperId, ref pResult.lookup); } private CORINFO_METHOD_STRUCT_* embedMethodHandle(CORINFO_METHOD_STRUCT_* handle, ref void* ppIndirection) { MethodDesc method = HandleToObject(handle); ISymbolNode methodHandleSymbol = _compilation.NodeFactory.RuntimeMethodHandle(method); CORINFO_METHOD_STRUCT_* result = (CORINFO_METHOD_STRUCT_*)ObjectToHandle(methodHandleSymbol); if (methodHandleSymbol.RepresentsIndirectionCell) { ppIndirection = result; return null; } else { ppIndirection = null; return result; } } private void getMethodVTableOffset(CORINFO_METHOD_STRUCT_* method, ref uint offsetOfIndirection, ref uint offsetAfterIndirection, ref bool isRelative) { MethodDesc methodDesc = HandleToObject(method); int pointerSize = _compilation.TypeSystemContext.Target.PointerSize; offsetOfIndirection = (uint)CORINFO_VIRTUALCALL_NO_CHUNK.Value; isRelative = false; // Normalize to the slot defining method. We don't have slot information for the overrides. methodDesc = MetadataVirtualMethodAlgorithm.FindSlotDefiningMethodForVirtualMethod(methodDesc); Debug.Assert(!methodDesc.CanMethodBeInSealedVTable()); // Avoid asking about slots on types like Foo<object, __Canon>. We might not have that information. // Canonically-equivalent types have the same slots, so ask for Foo<__Canon, __Canon>. methodDesc = methodDesc.GetCanonMethodTarget(CanonicalFormKind.Specific); int slot = VirtualMethodSlotHelper.GetVirtualMethodSlot(_compilation.NodeFactory, methodDesc, methodDesc.OwningType); if (slot == -1) { throw new InvalidOperationException(methodDesc.ToString()); } offsetAfterIndirection = (uint)(EETypeNode.GetVTableOffset(pointerSize) + slot * pointerSize); } private void expandRawHandleIntrinsic(ref CORINFO_RESOLVED_TOKEN pResolvedToken, ref CORINFO_GENERICHANDLE_RESULT pResult) { // Resolved token as a potentially RuntimeDetermined object. MethodDesc method = (MethodDesc)GetRuntimeDeterminedObjectForToken(ref pResolvedToken); switch (method.Name) { case "EETypePtrOf": case "MethodTableOf": ComputeLookup(ref pResolvedToken, method.Instantiation[0], ReadyToRunHelperId.TypeHandle, ref pResult.lookup); break; case "DefaultConstructorOf": ComputeLookup(ref pResolvedToken, method.Instantiation[0], ReadyToRunHelperId.DefaultConstructor, ref pResult.lookup); break; case "AllocatorOf": ComputeLookup(ref pResolvedToken, method.Instantiation[0], ReadyToRunHelperId.ObjectAllocator, ref pResult.lookup); break; } } private uint getMethodAttribs(CORINFO_METHOD_STRUCT_* ftn) { return getMethodAttribsInternal(HandleToObject(ftn)); } private void* getMethodSync(CORINFO_METHOD_STRUCT_* ftn, ref void* ppIndirection) { MethodDesc method = HandleToObject(ftn); TypeDesc type = method.OwningType; ISymbolNode methodSync = _compilation.NecessaryTypeSymbolIfPossible(type); void* result = (void*)ObjectToHandle(methodSync); if (methodSync.RepresentsIndirectionCell) { ppIndirection = result; return null; } else { ppIndirection = null; return result; } } private unsafe HRESULT allocPgoInstrumentationBySchema(CORINFO_METHOD_STRUCT_* ftnHnd, PgoInstrumentationSchema* pSchema, uint countSchemaItems, byte** pInstrumentationData) { throw new NotImplementedException("allocPgoInstrumentationBySchema"); } private CORINFO_CLASS_STRUCT_* getLikelyClass(CORINFO_METHOD_STRUCT_* ftnHnd, CORINFO_CLASS_STRUCT_* baseHnd, uint IlOffset, ref uint pLikelihood, ref uint pNumberOfClasses) { return null; } private void getAddressOfPInvokeTarget(CORINFO_METHOD_STRUCT_* method, ref CORINFO_CONST_LOOKUP pLookup) { MethodDesc md = HandleToObject(method); string externName = _compilation.PInvokeILProvider.GetDirectCallExternName(md); pLookup = CreateConstLookupToSymbol(_compilation.NodeFactory.ExternSymbol(externName)); } private void getGSCookie(IntPtr* pCookieVal, IntPtr** ppCookieVal) { // TODO: fully implement GS cookies if (pCookieVal != null) { if (PointerSize == 4) { *pCookieVal = (IntPtr)0x3F796857; } else { *pCookieVal = (IntPtr)0x216D6F6D202C6948; } *ppCookieVal = null; } else { throw new NotImplementedException("getGSCookie"); } } private bool pInvokeMarshalingRequired(CORINFO_METHOD_STRUCT_* handle, CORINFO_SIG_INFO* callSiteSig) { // calli is covered by convertPInvokeCalliToCall if (handle == null) { #if DEBUG MethodSignature methodSignature = (MethodSignature)HandleToObject((IntPtr)callSiteSig->pSig); MethodDesc stub = _compilation.PInvokeILProvider.GetCalliStub( methodSignature, ((MetadataType)HandleToObject(callSiteSig->scope).OwningMethod.OwningType).Module); Debug.Assert(!IsPInvokeStubRequired(stub)); #endif return false; } MethodDesc method = HandleToObject(handle); if (method.IsRawPInvoke()) return false; // Stub is required to trigger precise static constructor TypeDesc owningType = method.OwningType; if (_compilation.HasLazyStaticConstructor(owningType) && !((MetadataType)owningType).IsBeforeFieldInit) return true; // We could have given back the PInvoke stub IL to the JIT and let it inline it, without // checking whether there is any stub required. Save the JIT from doing the inlining by checking upfront. return IsPInvokeStubRequired(method); } private bool convertPInvokeCalliToCall(ref CORINFO_RESOLVED_TOKEN pResolvedToken, bool mustConvert) { var methodIL = (MethodIL)HandleToObject((IntPtr)pResolvedToken.tokenScope); // Suppress recursive expansion of calli in marshaling stubs if (methodIL is Internal.IL.Stubs.PInvokeILStubMethodIL) return false; MethodSignature signature = (MethodSignature)methodIL.GetObject((int)pResolvedToken.token); if ((signature.Flags & MethodSignatureFlags.UnmanagedCallingConventionMask) == 0) return false; MethodDesc stub = _compilation.PInvokeILProvider.GetCalliStub( signature, ((MetadataType)methodIL.OwningMethod.OwningType).Module); if (!mustConvert && !IsPInvokeStubRequired(stub)) return false; pResolvedToken.hMethod = ObjectToHandle(stub); pResolvedToken.hClass = ObjectToHandle(stub.OwningType); return true; } private bool IsPInvokeStubRequired(MethodDesc method) { if (_compilation.GetMethodIL(method) is Internal.IL.Stubs.PInvokeILStubMethodIL stub) return stub.IsStubRequired; // This path is taken for PInvokes replaced by RemovingILProvider return true; } private int SizeOfPInvokeTransitionFrame { get { // struct PInvokeTransitionFrame: // #ifdef _TARGET_ARM_ // m_ChainPointer // #endif // m_RIP // m_FramePointer // m_pThread // m_Flags + align (no align for ARM64 that has 64 bit m_Flags) // m_PreserverRegs - RSP // No need to save other preserved regs because of the JIT ensures that there are // no live GC references in callee saved registers around the PInvoke callsite. int size = 5 * this.PointerSize; if (_compilation.TypeSystemContext.Target.Architecture == TargetArchitecture.ARM) size += this.PointerSize; // m_ChainPointer return size; } } private bool canGetCookieForPInvokeCalliSig(CORINFO_SIG_INFO* szMetaSig) { throw new NotImplementedException("canGetCookieForPInvokeCalliSig"); } private void classMustBeLoadedBeforeCodeIsRun(CORINFO_CLASS_STRUCT_* cls) { } private void setEHcount(uint cEH) { _ehClauses = new CORINFO_EH_CLAUSE[cEH]; } private void setEHinfo(uint EHnumber, ref CORINFO_EH_CLAUSE clause) { _ehClauses[EHnumber] = clause; } private void reportInliningDecision(CORINFO_METHOD_STRUCT_* inlinerHnd, CORINFO_METHOD_STRUCT_* inlineeHnd, CorInfoInline inlineResult, byte* reason) { } private void updateEntryPointForTailCall(ref CORINFO_CONST_LOOKUP entryPoint) { } private int* getAddrOfCaptureThreadGlobal(ref void* ppIndirection) { ppIndirection = null; return (int*)ObjectToHandle(_compilation.NodeFactory.ExternSymbol("RhpTrapThreads")); } private void getFieldInfo(ref CORINFO_RESOLVED_TOKEN pResolvedToken, CORINFO_METHOD_STRUCT_* callerHandle, CORINFO_ACCESS_FLAGS flags, CORINFO_FIELD_INFO* pResult) { #if DEBUG // In debug, write some bogus data to the struct to ensure we have filled everything // properly. MemoryHelper.FillMemory((byte*)pResult, 0xcc, Marshal.SizeOf<CORINFO_FIELD_INFO>()); #endif Debug.Assert(((int)flags & ((int)CORINFO_ACCESS_FLAGS.CORINFO_ACCESS_GET | (int)CORINFO_ACCESS_FLAGS.CORINFO_ACCESS_SET | (int)CORINFO_ACCESS_FLAGS.CORINFO_ACCESS_ADDRESS | (int)CORINFO_ACCESS_FLAGS.CORINFO_ACCESS_INIT_ARRAY)) != 0); var field = HandleToObject(pResolvedToken.hField); CORINFO_FIELD_ACCESSOR fieldAccessor; CORINFO_FIELD_FLAGS fieldFlags = (CORINFO_FIELD_FLAGS)0; uint fieldOffset = (field.IsStatic && field.HasRva ? 0xBAADF00D : (uint)field.Offset.AsInt); if (field.IsStatic) { fieldFlags |= CORINFO_FIELD_FLAGS.CORINFO_FLG_FIELD_STATIC; if (field.HasRva) { fieldFlags |= CORINFO_FIELD_FLAGS.CORINFO_FLG_FIELD_UNMANAGED; // TODO: Handle the case when the RVA is in the TLS range fieldAccessor = CORINFO_FIELD_ACCESSOR.CORINFO_FIELD_STATIC_RVA_ADDRESS; // We are not going through a helper. The constructor has to be triggered explicitly. if (_compilation.HasLazyStaticConstructor(field.OwningType)) { fieldFlags |= CORINFO_FIELD_FLAGS.CORINFO_FLG_FIELD_INITCLASS; } } else if (field.OwningType.IsCanonicalSubtype(CanonicalFormKind.Any)) { // The JIT wants to know how to access a static field on a generic type. We need a runtime lookup. fieldAccessor = CORINFO_FIELD_ACCESSOR.CORINFO_FIELD_STATIC_READYTORUN_HELPER; pResult->helper = CorInfoHelpFunc.CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE; // Don't try to compute the runtime lookup if we're inlining. The JIT is going to abort the inlining // attempt anyway. if (pResolvedToken.tokenContext == contextFromMethodBeingCompiled()) { MethodDesc contextMethod = methodFromContext(pResolvedToken.tokenContext); FieldDesc runtimeDeterminedField = (FieldDesc)GetRuntimeDeterminedObjectForToken(ref pResolvedToken); ReadyToRunHelperId helperId; // Find out what kind of base do we need to look up. if (field.IsThreadStatic) { helperId = ReadyToRunHelperId.GetThreadStaticBase; } else if (field.HasGCStaticBase) { helperId = ReadyToRunHelperId.GetGCStaticBase; } else { helperId = ReadyToRunHelperId.GetNonGCStaticBase; } // What generic context do we look up the base from. ISymbolNode helper; if (contextMethod.AcquiresInstMethodTableFromThis() || contextMethod.RequiresInstMethodTableArg()) { helper = _compilation.NodeFactory.ReadyToRunHelperFromTypeLookup( helperId, runtimeDeterminedField.OwningType, contextMethod.OwningType); } else { Debug.Assert(contextMethod.RequiresInstMethodDescArg()); helper = _compilation.NodeFactory.ReadyToRunHelperFromDictionaryLookup( helperId, runtimeDeterminedField.OwningType, contextMethod); } pResult->fieldLookup = CreateConstLookupToSymbol(helper); } } else { fieldAccessor = CORINFO_FIELD_ACCESSOR.CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER; pResult->helper = CorInfoHelpFunc.CORINFO_HELP_READYTORUN_STATIC_BASE; ReadyToRunHelperId helperId = ReadyToRunHelperId.Invalid; CORINFO_FIELD_ACCESSOR intrinsicAccessor; if (field.IsIntrinsic && (flags & CORINFO_ACCESS_FLAGS.CORINFO_ACCESS_GET) != 0 && (intrinsicAccessor = getFieldIntrinsic(field)) != (CORINFO_FIELD_ACCESSOR)(-1)) { fieldAccessor = intrinsicAccessor; } else if (field.IsThreadStatic) { helperId = ReadyToRunHelperId.GetThreadStaticBase; } else { helperId = field.HasGCStaticBase ? ReadyToRunHelperId.GetGCStaticBase : ReadyToRunHelperId.GetNonGCStaticBase; // // Currently, we only do this optimization for regular statics, but it // looks like it may be permissible to do this optimization for // thread statics as well. // if ((flags & CORINFO_ACCESS_FLAGS.CORINFO_ACCESS_ADDRESS) != 0 && (fieldAccessor != CORINFO_FIELD_ACCESSOR.CORINFO_FIELD_STATIC_TLS)) { fieldFlags |= CORINFO_FIELD_FLAGS.CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN; } } if (helperId != ReadyToRunHelperId.Invalid) { pResult->fieldLookup = CreateConstLookupToSymbol( _compilation.NodeFactory.ReadyToRunHelper(helperId, field.OwningType)); } } } else { fieldAccessor = CORINFO_FIELD_ACCESSOR.CORINFO_FIELD_INSTANCE; } if (field.IsInitOnly) fieldFlags |= CORINFO_FIELD_FLAGS.CORINFO_FLG_FIELD_FINAL; pResult->fieldAccessor = fieldAccessor; pResult->fieldFlags = fieldFlags; pResult->fieldType = getFieldType(pResolvedToken.hField, &pResult->structType, pResolvedToken.hClass); pResult->accessAllowed = CorInfoIsAccessAllowedResult.CORINFO_ACCESS_ALLOWED; pResult->offset = fieldOffset; // TODO: We need to implement access checks for fields and methods. See JitInterface.cpp in mrtjit // and STS::AccessCheck::CanAccess. } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Diagnostics; using System.IO; using System.Runtime.InteropServices; using Internal.IL; using Internal.TypeSystem; using Internal.ReadyToRunConstants; using ILCompiler; using ILCompiler.DependencyAnalysis; using DependencyList = ILCompiler.DependencyAnalysisFramework.DependencyNodeCore<ILCompiler.DependencyAnalysis.NodeFactory>.DependencyList; #if SUPPORT_JIT using MethodCodeNode = Internal.Runtime.JitSupport.JitMethodCodeNode; using RyuJitCompilation = ILCompiler.Compilation; #endif namespace Internal.JitInterface { unsafe partial class CorInfoImpl { private const CORINFO_RUNTIME_ABI TargetABI = CORINFO_RUNTIME_ABI.CORINFO_CORERT_ABI; private uint OffsetOfDelegateFirstTarget => (uint)(4 * PointerSize); // Delegate::m_functionPointer private int SizeOfReversePInvokeTransitionFrame => 2 * PointerSize; private RyuJitCompilation _compilation; private MethodDebugInformation _debugInfo; private MethodCodeNode _methodCodeNode; private DebugLocInfo[] _debugLocInfos; private DebugVarInfo[] _debugVarInfos; private readonly UnboxingMethodDescFactory _unboxingThunkFactory = new UnboxingMethodDescFactory(); private bool _isFallbackBodyCompilation; private DependencyList _additionalDependencies; public CorInfoImpl(RyuJitCompilation compilation) : this() { _compilation = compilation; } private MethodDesc getUnboxingThunk(MethodDesc method) { return _unboxingThunkFactory.GetUnboxingMethod(method); } public void CompileMethod(MethodCodeNode methodCodeNodeNeedingCode, MethodIL methodIL = null) { _methodCodeNode = methodCodeNodeNeedingCode; _isFallbackBodyCompilation = methodIL != null; if (methodIL == null) methodIL = _compilation.GetMethodIL(MethodBeingCompiled); try { CompileMethodInternal(methodCodeNodeNeedingCode, methodIL); } finally { #if DEBUG // RyuJIT makes assumptions around the value of type symbols - in particular, it assumes // that type handles and type symbols have a 1:1 relationship. We therefore need to // make sure RyuJIT never sees a constructed and unconstructed type symbol for the // same type. This check makes sure we didn't accidentally hand out a necessary type symbol // that the compilation class didn't agree to handing out. // https://github.com/dotnet/runtimelab/issues/1128 for (int i = 0; i < _codeRelocs.Count; i++) { Debug.Assert(_codeRelocs[i].Target.GetType() != typeof(EETypeNode) || _compilation.NecessaryTypeSymbolIfPossible(((EETypeNode)_codeRelocs[i].Target).Type) == _codeRelocs[i].Target); } #endif CompileMethodCleanup(); } } private enum CFI_OPCODE { CFI_ADJUST_CFA_OFFSET, // Offset is adjusted relative to the current one. CFI_DEF_CFA_REGISTER, // New register is used to compute CFA CFI_REL_OFFSET, // Register is saved at offset from the current CFA CFI_DEF_CFA // Take address from register and add offset to it. } // Get the CFI data in the same shape as clang/LLVM generated one. This improves the compatibility with libunwind and other unwind solutions // - Combine in one single block for the whole prolog instead of one CFI block per assembler instruction // - Store CFA definition first // - Store all used registers in ascending order private byte[] CompressARM64CFI(byte[] blobData) { if (blobData == null || blobData.Length == 0) { return blobData; } Debug.Assert(blobData.Length % 8 == 0); short spReg = -1; int codeOffset = 0; short cfaRegister = spReg; int cfaOffset = 0; int spOffset = 0; int[] registerOffset = new int[96]; for (int i = 0; i < registerOffset.Length; i++) { registerOffset[i] = int.MinValue; } int offset = 0; while (offset < blobData.Length) { codeOffset = Math.Max(codeOffset, blobData[offset++]); CFI_OPCODE opcode = (CFI_OPCODE)blobData[offset++]; short dwarfReg = BitConverter.ToInt16(blobData, offset); offset += sizeof(short); int cfiOffset = BitConverter.ToInt32(blobData, offset); offset += sizeof(int); switch (opcode) { case CFI_OPCODE.CFI_DEF_CFA_REGISTER: cfaRegister = dwarfReg; if (spOffset != 0) { for (int i = 0; i < registerOffset.Length; i++) { if (registerOffset[i] != int.MinValue) { registerOffset[i] -= spOffset; } } cfaOffset += spOffset; spOffset = 0; } break; case CFI_OPCODE.CFI_REL_OFFSET: Debug.Assert(cfaRegister == spReg); registerOffset[dwarfReg] = cfiOffset; break; case CFI_OPCODE.CFI_ADJUST_CFA_OFFSET: if (cfaRegister != spReg) { cfaOffset += cfiOffset; } else { spOffset += cfiOffset; for (int i = 0; i < registerOffset.Length; i++) { if (registerOffset[i] != int.MinValue) { registerOffset[i] += cfiOffset; } } } break; } } using (MemoryStream cfiStream = new MemoryStream()) { int storeOffset = 0; using (BinaryWriter cfiWriter = new BinaryWriter(cfiStream)) { if (cfaRegister != -1) { cfiWriter.Write((byte)codeOffset); cfiWriter.Write(cfaOffset != 0 ? (byte)CFI_OPCODE.CFI_DEF_CFA : (byte)CFI_OPCODE.CFI_DEF_CFA_REGISTER); cfiWriter.Write(cfaRegister); cfiWriter.Write(cfaOffset); storeOffset = cfaOffset; } else { if (cfaOffset != 0) { cfiWriter.Write((byte)codeOffset); cfiWriter.Write((byte)CFI_OPCODE.CFI_ADJUST_CFA_OFFSET); cfiWriter.Write((short)-1); cfiWriter.Write(cfaOffset); } if (spOffset != 0) { cfiWriter.Write((byte)codeOffset); cfiWriter.Write((byte)CFI_OPCODE.CFI_DEF_CFA); cfiWriter.Write((short)31); cfiWriter.Write(spOffset); } } for (int i = registerOffset.Length - 1; i >= 0; i--) { if (registerOffset[i] != int.MinValue) { cfiWriter.Write((byte)codeOffset); cfiWriter.Write((byte)CFI_OPCODE.CFI_REL_OFFSET); cfiWriter.Write((short)i); cfiWriter.Write(registerOffset[i] + storeOffset); } } } return cfiStream.ToArray(); } } private CORINFO_RUNTIME_LOOKUP_KIND GetLookupKindFromContextSource(GenericContextSource contextSource) { switch (contextSource) { case GenericContextSource.MethodParameter: return CORINFO_RUNTIME_LOOKUP_KIND.CORINFO_LOOKUP_METHODPARAM; case GenericContextSource.TypeParameter: return CORINFO_RUNTIME_LOOKUP_KIND.CORINFO_LOOKUP_CLASSPARAM; default: Debug.Assert(contextSource == GenericContextSource.ThisObject); return CORINFO_RUNTIME_LOOKUP_KIND.CORINFO_LOOKUP_THISOBJ; } } private void ComputeLookup(ref CORINFO_RESOLVED_TOKEN pResolvedToken, object entity, ReadyToRunHelperId helperId, ref CORINFO_LOOKUP lookup) { if (_compilation.NeedsRuntimeLookup(helperId, entity)) { lookup.lookupKind.needsRuntimeLookup = true; lookup.runtimeLookup.signature = null; // Do not bother computing the runtime lookup if we are inlining. The JIT is going // to abort the inlining attempt anyway. if (pResolvedToken.tokenContext != contextFromMethodBeingCompiled()) { lookup.lookupKind.runtimeLookupKind = CORINFO_RUNTIME_LOOKUP_KIND.CORINFO_LOOKUP_NOT_SUPPORTED; return; } MethodDesc contextMethod = methodFromContext(pResolvedToken.tokenContext); GenericDictionaryLookup genericLookup = _compilation.ComputeGenericLookup(contextMethod, helperId, entity); if (genericLookup.UseHelper) { lookup.runtimeLookup.indirections = CORINFO.USEHELPER; lookup.lookupKind.runtimeLookupFlags = (ushort)genericLookup.HelperId; lookup.lookupKind.runtimeLookupArgs = (void*)ObjectToHandle(genericLookup.HelperObject); } else { if (genericLookup.ContextSource == GenericContextSource.MethodParameter) { lookup.runtimeLookup.helper = CorInfoHelpFunc.CORINFO_HELP_RUNTIMEHANDLE_METHOD; } else { lookup.runtimeLookup.helper = CorInfoHelpFunc.CORINFO_HELP_RUNTIMEHANDLE_CLASS; } lookup.runtimeLookup.indirections = (ushort)(genericLookup.NumberOfIndirections + (genericLookup.IndirectLastOffset ? 1 : 0)); lookup.runtimeLookup.offset0 = (IntPtr)genericLookup[0]; if (genericLookup.NumberOfIndirections > 1) { lookup.runtimeLookup.offset1 = (IntPtr)genericLookup[1]; if (genericLookup.IndirectLastOffset) lookup.runtimeLookup.offset2 = IntPtr.Zero; } else if (genericLookup.IndirectLastOffset) { lookup.runtimeLookup.offset1 = IntPtr.Zero; } lookup.runtimeLookup.sizeOffset = CORINFO.CORINFO_NO_SIZE_CHECK; lookup.runtimeLookup.testForFixup = false; // TODO: this will be needed in true multifile lookup.runtimeLookup.testForNull = false; lookup.runtimeLookup.indirectFirstOffset = false; lookup.runtimeLookup.indirectSecondOffset = false; lookup.lookupKind.runtimeLookupFlags = 0; lookup.lookupKind.runtimeLookupArgs = null; } lookup.lookupKind.runtimeLookupKind = GetLookupKindFromContextSource(genericLookup.ContextSource); } else { lookup.lookupKind.needsRuntimeLookup = false; ISymbolNode constLookup = _compilation.ComputeConstantLookup(helperId, entity); lookup.constLookup = CreateConstLookupToSymbol(constLookup); } } private bool getReadyToRunHelper(ref CORINFO_RESOLVED_TOKEN pResolvedToken, ref CORINFO_LOOKUP_KIND pGenericLookupKind, CorInfoHelpFunc id, ref CORINFO_CONST_LOOKUP pLookup) { switch (id) { case CorInfoHelpFunc.CORINFO_HELP_READYTORUN_NEW: case CorInfoHelpFunc.CORINFO_HELP_READYTORUN_NEWARR_1: case CorInfoHelpFunc.CORINFO_HELP_READYTORUN_ISINSTANCEOF: case CorInfoHelpFunc.CORINFO_HELP_READYTORUN_CHKCAST: return false; case CorInfoHelpFunc.CORINFO_HELP_READYTORUN_STATIC_BASE: { var type = HandleToObject(pResolvedToken.hClass); if (type.IsCanonicalSubtype(CanonicalFormKind.Any)) return false; pLookup = CreateConstLookupToSymbol(_compilation.NodeFactory.ReadyToRunHelper(ReadyToRunHelperId.GetNonGCStaticBase, type)); } break; case CorInfoHelpFunc.CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE: { // Token == 0 means "initialize this class". We only expect RyuJIT to call it for this case. Debug.Assert(pResolvedToken.token == 0 && pResolvedToken.tokenScope == null); Debug.Assert(pGenericLookupKind.needsRuntimeLookup); DefType typeToInitialize = (DefType)MethodBeingCompiled.OwningType; Debug.Assert(typeToInitialize.IsCanonicalSubtype(CanonicalFormKind.Any)); DefType helperArg = typeToInitialize.ConvertToSharedRuntimeDeterminedForm(); ISymbolNode helper = GetGenericLookupHelper(pGenericLookupKind.runtimeLookupKind, ReadyToRunHelperId.GetNonGCStaticBase, helperArg); pLookup = CreateConstLookupToSymbol(helper); } break; case CorInfoHelpFunc.CORINFO_HELP_READYTORUN_GENERIC_HANDLE: { Debug.Assert(pGenericLookupKind.needsRuntimeLookup); ReadyToRunHelperId helperId = (ReadyToRunHelperId)pGenericLookupKind.runtimeLookupFlags; object helperArg = HandleToObject((IntPtr)pGenericLookupKind.runtimeLookupArgs); ISymbolNode helper = GetGenericLookupHelper(pGenericLookupKind.runtimeLookupKind, helperId, helperArg); pLookup = CreateConstLookupToSymbol(helper); } break; default: throw new NotImplementedException("ReadyToRun: " + id.ToString()); } return true; } private void getReadyToRunDelegateCtorHelper(ref CORINFO_RESOLVED_TOKEN pTargetMethod, CORINFO_CLASS_STRUCT_* delegateType, ref CORINFO_LOOKUP pLookup) { #if DEBUG // In debug, write some bogus data to the struct to ensure we have filled everything // properly. fixed (CORINFO_LOOKUP* tmp = &pLookup) MemoryHelper.FillMemory((byte*)tmp, 0xcc, sizeof(CORINFO_LOOKUP)); #endif MethodDesc targetMethod = HandleToObject(pTargetMethod.hMethod); TypeDesc delegateTypeDesc = HandleToObject(delegateType); if (targetMethod.IsSharedByGenericInstantiations) { // If the method is not exact, fetch it as a runtime determined method. targetMethod = (MethodDesc)GetRuntimeDeterminedObjectForToken(ref pTargetMethod); } bool isLdvirtftn = pTargetMethod.tokenType == CorInfoTokenKind.CORINFO_TOKENKIND_Ldvirtftn; DelegateCreationInfo delegateInfo = _compilation.GetDelegateCtor(delegateTypeDesc, targetMethod, isLdvirtftn); if (delegateInfo.NeedsRuntimeLookup) { pLookup.lookupKind.needsRuntimeLookup = true; MethodDesc contextMethod = methodFromContext(pTargetMethod.tokenContext); // We should not be inlining these. RyuJIT should have aborted inlining already. Debug.Assert(contextMethod == MethodBeingCompiled); pLookup.lookupKind.runtimeLookupKind = GetGenericRuntimeLookupKind(contextMethod); pLookup.lookupKind.runtimeLookupFlags = (ushort)ReadyToRunHelperId.DelegateCtor; pLookup.lookupKind.runtimeLookupArgs = (void*)ObjectToHandle(delegateInfo); } else { pLookup.lookupKind.needsRuntimeLookup = false; pLookup.constLookup = CreateConstLookupToSymbol(_compilation.NodeFactory.ReadyToRunHelper(ReadyToRunHelperId.DelegateCtor, delegateInfo)); } } private ISymbolNode GetHelperFtnUncached(CorInfoHelpFunc ftnNum) { ReadyToRunHelper id; switch (ftnNum) { case CorInfoHelpFunc.CORINFO_HELP_THROW: id = ReadyToRunHelper.Throw; break; case CorInfoHelpFunc.CORINFO_HELP_RETHROW: id = ReadyToRunHelper.Rethrow; break; case CorInfoHelpFunc.CORINFO_HELP_USER_BREAKPOINT: id = ReadyToRunHelper.DebugBreak; break; case CorInfoHelpFunc.CORINFO_HELP_OVERFLOW: id = ReadyToRunHelper.Overflow; break; case CorInfoHelpFunc.CORINFO_HELP_RNGCHKFAIL: id = ReadyToRunHelper.RngChkFail; break; case CorInfoHelpFunc.CORINFO_HELP_FAIL_FAST: id = ReadyToRunHelper.FailFast; break; case CorInfoHelpFunc.CORINFO_HELP_THROWNULLREF: id = ReadyToRunHelper.ThrowNullRef; break; case CorInfoHelpFunc.CORINFO_HELP_THROWDIVZERO: id = ReadyToRunHelper.ThrowDivZero; break; case CorInfoHelpFunc.CORINFO_HELP_THROW_ARGUMENTOUTOFRANGEEXCEPTION: id = ReadyToRunHelper.ThrowArgumentOutOfRange; break; case CorInfoHelpFunc.CORINFO_HELP_THROW_ARGUMENTEXCEPTION: id = ReadyToRunHelper.ThrowArgument; break; case CorInfoHelpFunc.CORINFO_HELP_THROW_NOT_IMPLEMENTED: id = ReadyToRunHelper.ThrowNotImplemented; break; case CorInfoHelpFunc.CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED: id = ReadyToRunHelper.ThrowPlatformNotSupported; break; case CorInfoHelpFunc.CORINFO_HELP_ASSIGN_REF: id = ReadyToRunHelper.WriteBarrier; break; case CorInfoHelpFunc.CORINFO_HELP_CHECKED_ASSIGN_REF: id = ReadyToRunHelper.CheckedWriteBarrier; break; case CorInfoHelpFunc.CORINFO_HELP_ASSIGN_BYREF: id = ReadyToRunHelper.ByRefWriteBarrier; break; case CorInfoHelpFunc.CORINFO_HELP_ASSIGN_REF_EAX: id = ReadyToRunHelper.WriteBarrier_EAX; break; case CorInfoHelpFunc.CORINFO_HELP_ASSIGN_REF_ECX: id = ReadyToRunHelper.WriteBarrier_ECX; break; case CorInfoHelpFunc.CORINFO_HELP_CHECKED_ASSIGN_REF_EAX: id = ReadyToRunHelper.CheckedWriteBarrier_EAX; break; case CorInfoHelpFunc.CORINFO_HELP_CHECKED_ASSIGN_REF_ECX: id = ReadyToRunHelper.CheckedWriteBarrier_ECX; break; case CorInfoHelpFunc.CORINFO_HELP_ARRADDR_ST: id = ReadyToRunHelper.Stelem_Ref; break; case CorInfoHelpFunc.CORINFO_HELP_LDELEMA_REF: id = ReadyToRunHelper.Ldelema_Ref; break; case CorInfoHelpFunc.CORINFO_HELP_MEMSET: id = ReadyToRunHelper.MemSet; break; case CorInfoHelpFunc.CORINFO_HELP_MEMCPY: id = ReadyToRunHelper.MemCpy; break; case CorInfoHelpFunc.CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE: id = ReadyToRunHelper.GetRuntimeType; break; case CorInfoHelpFunc.CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD: id = ReadyToRunHelper.GetRuntimeMethodHandle; break; case CorInfoHelpFunc.CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD: id = ReadyToRunHelper.GetRuntimeFieldHandle; break; case CorInfoHelpFunc.CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE: id = ReadyToRunHelper.GetRuntimeTypeHandle; break; case CorInfoHelpFunc.CORINFO_HELP_ARE_TYPES_EQUIVALENT: id = ReadyToRunHelper.AreTypesEquivalent; break; case CorInfoHelpFunc.CORINFO_HELP_BOX: id = ReadyToRunHelper.Box; break; case CorInfoHelpFunc.CORINFO_HELP_BOX_NULLABLE: id = ReadyToRunHelper.Box_Nullable; break; case CorInfoHelpFunc.CORINFO_HELP_UNBOX: id = ReadyToRunHelper.Unbox; break; case CorInfoHelpFunc.CORINFO_HELP_UNBOX_NULLABLE: id = ReadyToRunHelper.Unbox_Nullable; break; case CorInfoHelpFunc.CORINFO_HELP_NEW_MDARR: id = ReadyToRunHelper.NewMultiDimArr; break; case CorInfoHelpFunc.CORINFO_HELP_NEWFAST: id = ReadyToRunHelper.NewObject; break; case CorInfoHelpFunc.CORINFO_HELP_NEWSFAST: return _compilation.NodeFactory.ExternSymbol("RhpNewFast"); case CorInfoHelpFunc.CORINFO_HELP_NEWSFAST_FINALIZE: return _compilation.NodeFactory.ExternSymbol("RhpNewFinalizable"); case CorInfoHelpFunc.CORINFO_HELP_NEWSFAST_ALIGN8: return _compilation.NodeFactory.ExternSymbol("RhpNewFastAlign8"); case CorInfoHelpFunc.CORINFO_HELP_NEWSFAST_ALIGN8_FINALIZE: return _compilation.NodeFactory.ExternSymbol("RhpNewFinalizableAlign8"); case CorInfoHelpFunc.CORINFO_HELP_NEWSFAST_ALIGN8_VC: return _compilation.NodeFactory.ExternSymbol("RhpNewFastMisalign"); case CorInfoHelpFunc.CORINFO_HELP_NEWARR_1_DIRECT: id = ReadyToRunHelper.NewArray; break; case CorInfoHelpFunc.CORINFO_HELP_NEWARR_1_ALIGN8: return _compilation.NodeFactory.ExternSymbol("RhpNewArrayAlign8"); case CorInfoHelpFunc.CORINFO_HELP_NEWARR_1_VC: return _compilation.NodeFactory.ExternSymbol("RhpNewArray"); case CorInfoHelpFunc.CORINFO_HELP_STACK_PROBE: return _compilation.NodeFactory.ExternSymbol("RhpStackProbe"); case CorInfoHelpFunc.CORINFO_HELP_POLL_GC: return _compilation.NodeFactory.ExternSymbol("RhpGcPoll"); case CorInfoHelpFunc.CORINFO_HELP_LMUL: id = ReadyToRunHelper.LMul; break; case CorInfoHelpFunc.CORINFO_HELP_LMUL_OVF: id = ReadyToRunHelper.LMulOfv; break; case CorInfoHelpFunc.CORINFO_HELP_ULMUL_OVF: id = ReadyToRunHelper.ULMulOvf; break; case CorInfoHelpFunc.CORINFO_HELP_LDIV: id = ReadyToRunHelper.LDiv; break; case CorInfoHelpFunc.CORINFO_HELP_LMOD: id = ReadyToRunHelper.LMod; break; case CorInfoHelpFunc.CORINFO_HELP_ULDIV: id = ReadyToRunHelper.ULDiv; break; case CorInfoHelpFunc.CORINFO_HELP_ULMOD: id = ReadyToRunHelper.ULMod; break; case CorInfoHelpFunc.CORINFO_HELP_LLSH: id = ReadyToRunHelper.LLsh; break; case CorInfoHelpFunc.CORINFO_HELP_LRSH: id = ReadyToRunHelper.LRsh; break; case CorInfoHelpFunc.CORINFO_HELP_LRSZ: id = ReadyToRunHelper.LRsz; break; case CorInfoHelpFunc.CORINFO_HELP_LNG2DBL: id = ReadyToRunHelper.Lng2Dbl; break; case CorInfoHelpFunc.CORINFO_HELP_ULNG2DBL: id = ReadyToRunHelper.ULng2Dbl; break; case CorInfoHelpFunc.CORINFO_HELP_DIV: id = ReadyToRunHelper.Div; break; case CorInfoHelpFunc.CORINFO_HELP_MOD: id = ReadyToRunHelper.Mod; break; case CorInfoHelpFunc.CORINFO_HELP_UDIV: id = ReadyToRunHelper.UDiv; break; case CorInfoHelpFunc.CORINFO_HELP_UMOD: id = ReadyToRunHelper.UMod; break; case CorInfoHelpFunc.CORINFO_HELP_DBL2INT: id = ReadyToRunHelper.Dbl2Int; break; case CorInfoHelpFunc.CORINFO_HELP_DBL2INT_OVF: id = ReadyToRunHelper.Dbl2IntOvf; break; case CorInfoHelpFunc.CORINFO_HELP_DBL2LNG: id = ReadyToRunHelper.Dbl2Lng; break; case CorInfoHelpFunc.CORINFO_HELP_DBL2LNG_OVF: id = ReadyToRunHelper.Dbl2LngOvf; break; case CorInfoHelpFunc.CORINFO_HELP_DBL2UINT: id = ReadyToRunHelper.Dbl2UInt; break; case CorInfoHelpFunc.CORINFO_HELP_DBL2UINT_OVF: id = ReadyToRunHelper.Dbl2UIntOvf; break; case CorInfoHelpFunc.CORINFO_HELP_DBL2ULNG: id = ReadyToRunHelper.Dbl2ULng; break; case CorInfoHelpFunc.CORINFO_HELP_DBL2ULNG_OVF: id = ReadyToRunHelper.Dbl2ULngOvf; break; case CorInfoHelpFunc.CORINFO_HELP_FLTREM: id = ReadyToRunHelper.FltRem; break; case CorInfoHelpFunc.CORINFO_HELP_DBLREM: id = ReadyToRunHelper.DblRem; break; case CorInfoHelpFunc.CORINFO_HELP_FLTROUND: id = ReadyToRunHelper.FltRound; break; case CorInfoHelpFunc.CORINFO_HELP_DBLROUND: id = ReadyToRunHelper.DblRound; break; case CorInfoHelpFunc.CORINFO_HELP_JIT_PINVOKE_BEGIN: id = ReadyToRunHelper.PInvokeBegin; break; case CorInfoHelpFunc.CORINFO_HELP_JIT_PINVOKE_END: id = ReadyToRunHelper.PInvokeEnd; break; case CorInfoHelpFunc.CORINFO_HELP_JIT_REVERSE_PINVOKE_ENTER: id = ReadyToRunHelper.ReversePInvokeEnter; break; case CorInfoHelpFunc.CORINFO_HELP_JIT_REVERSE_PINVOKE_EXIT: id = ReadyToRunHelper.ReversePInvokeExit; break; case CorInfoHelpFunc.CORINFO_HELP_CHKCASTANY: id = ReadyToRunHelper.CheckCastAny; break; case CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFANY: id = ReadyToRunHelper.CheckInstanceAny; break; case CorInfoHelpFunc.CORINFO_HELP_CHKCASTCLASS: case CorInfoHelpFunc.CORINFO_HELP_CHKCASTCLASS_SPECIAL: // TODO: separate helper for the _SPECIAL case id = ReadyToRunHelper.CheckCastClass; break; case CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFCLASS: id = ReadyToRunHelper.CheckInstanceClass; break; case CorInfoHelpFunc.CORINFO_HELP_CHKCASTARRAY: id = ReadyToRunHelper.CheckCastArray; break; case CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFARRAY: id = ReadyToRunHelper.CheckInstanceArray; break; case CorInfoHelpFunc.CORINFO_HELP_CHKCASTINTERFACE: id = ReadyToRunHelper.CheckCastInterface; break; case CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFINTERFACE: id = ReadyToRunHelper.CheckInstanceInterface; break; case CorInfoHelpFunc.CORINFO_HELP_MON_ENTER: id = ReadyToRunHelper.MonitorEnter; break; case CorInfoHelpFunc.CORINFO_HELP_MON_EXIT: id = ReadyToRunHelper.MonitorExit; break; case CorInfoHelpFunc.CORINFO_HELP_MON_ENTER_STATIC: id = ReadyToRunHelper.MonitorEnterStatic; break; case CorInfoHelpFunc.CORINFO_HELP_MON_EXIT_STATIC: id = ReadyToRunHelper.MonitorExitStatic; break; case CorInfoHelpFunc.CORINFO_HELP_GVMLOOKUP_FOR_SLOT: id = ReadyToRunHelper.GVMLookupForSlot; break; case CorInfoHelpFunc.CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL: id = ReadyToRunHelper.TypeHandleToRuntimeType; break; case CorInfoHelpFunc.CORINFO_HELP_GETREFANY: id = ReadyToRunHelper.GetRefAny; break; case CorInfoHelpFunc.CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL: id = ReadyToRunHelper.TypeHandleToRuntimeTypeHandle; break; case CorInfoHelpFunc.CORINFO_HELP_GETCURRENTMANAGEDTHREADID: id = ReadyToRunHelper.GetCurrentManagedThreadId; break; case CorInfoHelpFunc.CORINFO_HELP_VALIDATE_INDIRECT_CALL: return _compilation.NodeFactory.ExternIndirectSymbol("__guard_check_icall_fptr"); case CorInfoHelpFunc.CORINFO_HELP_DISPATCH_INDIRECT_CALL: return _compilation.NodeFactory.ExternIndirectSymbol("__guard_dispatch_icall_fptr"); default: throw new NotImplementedException(ftnNum.ToString()); } string mangledName; MethodDesc methodDesc; JitHelper.GetEntryPoint(_compilation.TypeSystemContext, id, out mangledName, out methodDesc); Debug.Assert(mangledName != null || methodDesc != null); ISymbolNode entryPoint; if (mangledName != null) entryPoint = _compilation.NodeFactory.ExternSymbol(mangledName); else entryPoint = _compilation.NodeFactory.MethodEntrypoint(methodDesc); return entryPoint; } private void getFunctionEntryPoint(CORINFO_METHOD_STRUCT_* ftn, ref CORINFO_CONST_LOOKUP pResult, CORINFO_ACCESS_FLAGS accessFlags) { MethodDesc method = HandleToObject(ftn); // TODO: Implement MapMethodDeclToMethodImpl from CoreCLR if (method.IsVirtual && method.OwningType is MetadataType mdType && mdType.VirtualMethodImplsForType.Length > 0) { throw new NotImplementedException("getFunctionEntryPoint"); } pResult = CreateConstLookupToSymbol(_compilation.NodeFactory.MethodEntrypoint(method)); } private bool canTailCall(CORINFO_METHOD_STRUCT_* callerHnd, CORINFO_METHOD_STRUCT_* declaredCalleeHnd, CORINFO_METHOD_STRUCT_* exactCalleeHnd, bool fIsTailPrefix) { // Assume we can tail call unless proved otherwise bool result = true; if (!fIsTailPrefix) { MethodDesc caller = HandleToObject(callerHnd); if (caller.IsNoInlining) { // Do not tailcall from methods that are marked as noinline (people often use no-inline // to mean "I want to always see this method in stacktrace") result = false; } } return result; } private InfoAccessType constructStringLiteral(CORINFO_MODULE_STRUCT_* module, mdToken metaTok, ref void* ppValue) { MethodIL methodIL = (MethodIL)HandleToObject((IntPtr)module); ISymbolNode stringObject; if (metaTok == (mdToken)CorConstants.CorTokenType.mdtString) { stringObject = _compilation.NodeFactory.SerializedStringObject(""); } else { object literal = methodIL.GetObject((int)metaTok); stringObject = _compilation.NodeFactory.SerializedStringObject((string)literal); } ppValue = (void*)ObjectToHandle(stringObject); return stringObject.RepresentsIndirectionCell ? InfoAccessType.IAT_PVALUE : InfoAccessType.IAT_VALUE; } enum RhEHClauseKind { RH_EH_CLAUSE_TYPED = 0, RH_EH_CLAUSE_FAULT = 1, RH_EH_CLAUSE_FILTER = 2 } private ObjectNode.ObjectData EncodeEHInfo() { var builder = new ObjectDataBuilder(); builder.RequireInitialAlignment(1); int totalClauses = _ehClauses.Length; // Count the number of special markers that will be needed for (int i = 1; i < _ehClauses.Length; i++) { ref CORINFO_EH_CLAUSE clause = ref _ehClauses[i]; ref CORINFO_EH_CLAUSE previousClause = ref _ehClauses[i - 1]; if ((previousClause.TryOffset == clause.TryOffset) && (previousClause.TryLength == clause.TryLength) && ((clause.Flags & CORINFO_EH_CLAUSE_FLAGS.CORINFO_EH_CLAUSE_SAMETRY) == 0)) { totalClauses++; } } builder.EmitCompressedUInt((uint)totalClauses); for (int i = 0; i < _ehClauses.Length; i++) { ref CORINFO_EH_CLAUSE clause = ref _ehClauses[i]; if (i > 0) { ref CORINFO_EH_CLAUSE previousClause = ref _ehClauses[i - 1]; // If the previous clause has same try offset and length as the current clause, // but belongs to a different try block (CORINFO_EH_CLAUSE_SAMETRY is not set), // emit a special marker to allow runtime distinguish this case. if ((previousClause.TryOffset == clause.TryOffset) && (previousClause.TryLength == clause.TryLength) && ((clause.Flags & CORINFO_EH_CLAUSE_FLAGS.CORINFO_EH_CLAUSE_SAMETRY) == 0)) { builder.EmitCompressedUInt(0); builder.EmitCompressedUInt((uint)RhEHClauseKind.RH_EH_CLAUSE_FAULT); builder.EmitCompressedUInt(0); } } RhEHClauseKind clauseKind; if (((clause.Flags & CORINFO_EH_CLAUSE_FLAGS.CORINFO_EH_CLAUSE_FAULT) != 0) || ((clause.Flags & CORINFO_EH_CLAUSE_FLAGS.CORINFO_EH_CLAUSE_FINALLY) != 0)) { clauseKind = RhEHClauseKind.RH_EH_CLAUSE_FAULT; } else if ((clause.Flags & CORINFO_EH_CLAUSE_FLAGS.CORINFO_EH_CLAUSE_FILTER) != 0) { clauseKind = RhEHClauseKind.RH_EH_CLAUSE_FILTER; } else { clauseKind = RhEHClauseKind.RH_EH_CLAUSE_TYPED; } builder.EmitCompressedUInt((uint)clause.TryOffset); // clause.TryLength returned by the JIT is actually end offset... // https://github.com/dotnet/runtime/issues/5282 int tryLength = (int)clause.TryLength - (int)clause.TryOffset; builder.EmitCompressedUInt((uint)((tryLength << 2) | (int)clauseKind)); switch (clauseKind) { case RhEHClauseKind.RH_EH_CLAUSE_TYPED: { builder.EmitCompressedUInt(clause.HandlerOffset); var methodIL = (MethodIL)HandleToObject((IntPtr)_methodScope); var type = (TypeDesc)methodIL.GetObject((int)clause.ClassTokenOrOffset); // Once https://github.com/dotnet/corert/issues/3460 is done, this should be an assert. // Throwing InvalidProgram is not great, but we want to do *something* if this happens // because doing nothing means problems at runtime. This is not worth piping a // a new exception with a fancy message for. if (type.IsCanonicalSubtype(CanonicalFormKind.Any)) ThrowHelper.ThrowInvalidProgramException(); var typeSymbol = _compilation.NecessaryTypeSymbolIfPossible(type); RelocType rel = (_compilation.NodeFactory.Target.IsWindows) ? RelocType.IMAGE_REL_BASED_ABSOLUTE : RelocType.IMAGE_REL_BASED_RELPTR32; if (_compilation.NodeFactory.Target.Abi == TargetAbi.Jit) rel = RelocType.IMAGE_REL_BASED_REL32; builder.EmitReloc(typeSymbol, rel); } break; case RhEHClauseKind.RH_EH_CLAUSE_FAULT: builder.EmitCompressedUInt(clause.HandlerOffset); break; case RhEHClauseKind.RH_EH_CLAUSE_FILTER: builder.EmitCompressedUInt(clause.HandlerOffset); builder.EmitCompressedUInt(clause.ClassTokenOrOffset); break; } } return builder.ToObjectData(); } private void setVars(CORINFO_METHOD_STRUCT_* ftn, uint cVars, NativeVarInfo* vars) { var methodIL = (MethodIL)HandleToObject((IntPtr)_methodScope); MethodSignature sig = methodIL.OwningMethod.Signature; int numLocals = methodIL.GetLocals().Length; ArrayBuilder<DebugVarRangeInfo>[] debugVarInfoBuilders = new ArrayBuilder<DebugVarRangeInfo>[(sig.IsStatic ? 0 : 1) + sig.Length + numLocals]; for (uint i = 0; i < cVars; i++) { uint varNumber = vars[i].varNumber; if (varNumber < debugVarInfoBuilders.Length) debugVarInfoBuilders[varNumber].Add(new DebugVarRangeInfo(vars[i].startOffset, vars[i].endOffset, vars[i].varLoc)); } var debugVarInfos = new ArrayBuilder<DebugVarInfo>(); for (uint i = 0; i < debugVarInfoBuilders.Length; i++) { if (debugVarInfoBuilders[i].Count > 0) { debugVarInfos.Add(new DebugVarInfo(i, debugVarInfoBuilders[i].ToArray())); } } _debugVarInfos = debugVarInfos.ToArray(); // JIT gave the ownership of this to us, so need to free this. freeArray(vars); } /// <summary> /// Create a DebugLocInfo which is a table from native offset to source line. /// using native to il offset (pMap) and il to source line (_sequencePoints). /// </summary> private void setBoundaries(CORINFO_METHOD_STRUCT_* ftn, uint cMap, OffsetMapping* pMap) { Debug.Assert(_debugLocInfos == null); int largestILOffset = 0; // All epiloges point to the largest IL offset. for (int i = 0; i < cMap; i++) { OffsetMapping nativeToILInfo = pMap[i]; int currectILOffset = (int)nativeToILInfo.ilOffset; if (currectILOffset > largestILOffset) // Special offsets are negative. { largestILOffset = currectILOffset; } } ArrayBuilder<DebugLocInfo> debugLocInfos = new ArrayBuilder<DebugLocInfo>(); for (int i = 0; i < cMap; i++) { OffsetMapping* nativeToILInfo = &pMap[i]; int ilOffset = (int)nativeToILInfo->ilOffset; switch (ilOffset) { case (int)MappingTypes.PROLOG: ilOffset = 0; break; case (int)MappingTypes.EPILOG: ilOffset = largestILOffset; break; case (int)MappingTypes.NO_MAPPING: continue; } debugLocInfos.Add(new DebugLocInfo((int)nativeToILInfo->nativeOffset, ilOffset)); } if (debugLocInfos.Count > 0) { _debugLocInfos = debugLocInfos.ToArray(); } freeArray(pMap); } private void SetDebugInformation(IMethodNode methodCodeNodeNeedingCode, MethodIL methodIL) { _debugInfo = _compilation.GetDebugInfo(methodIL); } private ISymbolNode GetGenericLookupHelper(CORINFO_RUNTIME_LOOKUP_KIND runtimeLookupKind, ReadyToRunHelperId helperId, object helperArgument) { if (runtimeLookupKind == CORINFO_RUNTIME_LOOKUP_KIND.CORINFO_LOOKUP_THISOBJ || runtimeLookupKind == CORINFO_RUNTIME_LOOKUP_KIND.CORINFO_LOOKUP_CLASSPARAM) { return _compilation.NodeFactory.ReadyToRunHelperFromTypeLookup(helperId, helperArgument, MethodBeingCompiled.OwningType); } Debug.Assert(runtimeLookupKind == CORINFO_RUNTIME_LOOKUP_KIND.CORINFO_LOOKUP_METHODPARAM); return _compilation.NodeFactory.ReadyToRunHelperFromDictionaryLookup(helperId, helperArgument, MethodBeingCompiled); } private CorInfoHelpFunc getCastingHelper(ref CORINFO_RESOLVED_TOKEN pResolvedToken, bool fThrowing) { TypeDesc type = HandleToObject(pResolvedToken.hClass); CorInfoHelpFunc helper; if (type.IsCanonicalDefinitionType(CanonicalFormKind.Any)) { // In shared code just use the catch-all helper for type variables, as the same // code may be used for interface/array/class instantiations // // We may be able to take advantage of constraints to select a specialized helper. // This optimizations does not seem to be warranted at the moment. helper = CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFANY; } else if (type.IsInterface) { // If it is an interface, use the fast interface helper helper = CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFINTERFACE; } else if (type.IsArray) { // If it is an array, use the fast array helper helper = CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFARRAY; } else if (type.IsDefType) { helper = CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFCLASS; #if !SUPPORT_JIT // When this assert is hit, we'll have to do something with the class checks in RyuJIT // Frozen strings might end up failing inlined checks generated by RyuJIT for sealed classes. Debug.Assert(!_compilation.NodeFactory.CompilationModuleGroup.CanHaveReferenceThroughImportTable); #endif } else { // Otherwise, use the slow helper helper = CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFANY; } if (fThrowing) { int delta = CorInfoHelpFunc.CORINFO_HELP_CHKCASTANY - CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFANY; Debug.Assert(CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFINTERFACE + delta == CorInfoHelpFunc.CORINFO_HELP_CHKCASTINTERFACE); Debug.Assert(CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFARRAY + delta == CorInfoHelpFunc.CORINFO_HELP_CHKCASTARRAY); Debug.Assert(CorInfoHelpFunc.CORINFO_HELP_ISINSTANCEOFCLASS + delta == CorInfoHelpFunc.CORINFO_HELP_CHKCASTCLASS); helper += delta; } return helper; } private CorInfoHelpFunc getNewHelper(ref CORINFO_RESOLVED_TOKEN pResolvedToken, CORINFO_METHOD_STRUCT_* callerHandle, ref bool pHasSideEffects) { TypeDesc type = HandleToObject(pResolvedToken.hClass); Debug.Assert(!type.IsString && !type.IsArray && !type.IsCanonicalDefinitionType(CanonicalFormKind.Any)); pHasSideEffects = type.HasFinalizer; if (type.RequiresAlign8()) { if (type.HasFinalizer) return CorInfoHelpFunc.CORINFO_HELP_NEWSFAST_ALIGN8_FINALIZE; if (type.IsValueType) return CorInfoHelpFunc.CORINFO_HELP_NEWSFAST_ALIGN8_VC; return CorInfoHelpFunc.CORINFO_HELP_NEWSFAST_ALIGN8; } if (type.HasFinalizer) return CorInfoHelpFunc.CORINFO_HELP_NEWSFAST_FINALIZE; return CorInfoHelpFunc.CORINFO_HELP_NEWSFAST; } private CorInfoHelpFunc getNewArrHelper(CORINFO_CLASS_STRUCT_* arrayCls) { TypeDesc type = HandleToObject(arrayCls); Debug.Assert(type.IsArray); if (type.RequiresAlign8()) return CorInfoHelpFunc.CORINFO_HELP_NEWARR_1_ALIGN8; return CorInfoHelpFunc.CORINFO_HELP_NEWARR_1_VC; } private IMethodNode GetMethodEntrypoint(CORINFO_MODULE_STRUCT_* pScope, MethodDesc method) { bool isUnboxingThunk = method.IsUnboxingThunk(); if (isUnboxingThunk) { method = method.GetUnboxedMethod(); } if (method.HasInstantiation || method.OwningType.HasInstantiation) { MethodIL methodIL = (MethodIL)HandleToObject((IntPtr)pScope); _compilation.DetectGenericCycles(methodIL.OwningMethod, method); } return _compilation.NodeFactory.MethodEntrypoint(method, isUnboxingThunk); } private static bool IsTypeSpecForTypicalInstantiation(TypeDesc t) { Instantiation inst = t.Instantiation; for (int i = 0; i < inst.Length; i++) { var arg = inst[i] as SignatureTypeVariable; if (arg == null || arg.Index != i) return false; } return true; } private void getCallInfo(ref CORINFO_RESOLVED_TOKEN pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_METHOD_STRUCT_* callerHandle, CORINFO_CALLINFO_FLAGS flags, CORINFO_CALL_INFO* pResult) { #if DEBUG // In debug, write some bogus data to the struct to ensure we have filled everything // properly. MemoryHelper.FillMemory((byte*)pResult, 0xcc, Marshal.SizeOf<CORINFO_CALL_INFO>()); #endif MethodDesc method = HandleToObject(pResolvedToken.hMethod); // Spec says that a callvirt lookup ignores static methods. Since static methods // can't have the exact same signature as instance methods, a lookup that found // a static method would have never found an instance method. if (method.Signature.IsStatic && (flags & CORINFO_CALLINFO_FLAGS.CORINFO_CALLINFO_CALLVIRT) != 0) { throw new BadImageFormatException(); } TypeDesc exactType = HandleToObject(pResolvedToken.hClass); TypeDesc constrainedType = null; if (pConstrainedResolvedToken != null) { constrainedType = HandleToObject(pConstrainedResolvedToken->hClass); } bool resolvedConstraint = false; bool forceUseRuntimeLookup = false; bool targetIsFatFunctionPointer = false; bool useFatCallTransform = false; MethodDesc methodAfterConstraintResolution = method; if (constrainedType == null) { pResult->thisTransform = CORINFO_THIS_TRANSFORM.CORINFO_NO_THIS_TRANSFORM; } else { // We have a "constrained." call. Try a partial resolve of the constraint call. Note that this // will not necessarily resolve the call exactly, since we might be compiling // shared generic code - it may just resolve it to a candidate suitable for // JIT compilation, and require a runtime lookup for the actual code pointer // to call. MethodDesc directMethod = constrainedType.GetClosestDefType().TryResolveConstraintMethodApprox(exactType, method, out forceUseRuntimeLookup); if (directMethod == null && constrainedType.IsEnum) { // Constrained calls to methods on enum methods resolve to System.Enum's methods. System.Enum is a reference // type though, so we would fail to resolve and box. We have a special path for those to avoid boxing. directMethod = _compilation.TypeSystemContext.TryResolveConstrainedEnumMethod(constrainedType, method); } if (directMethod != null) { // Either // 1. no constraint resolution at compile time (!directMethod) // OR 2. no code sharing lookup in call // OR 3. we have have resolved to an instantiating stub methodAfterConstraintResolution = directMethod; Debug.Assert(!methodAfterConstraintResolution.OwningType.IsInterface); resolvedConstraint = true; pResult->thisTransform = CORINFO_THIS_TRANSFORM.CORINFO_NO_THIS_TRANSFORM; exactType = constrainedType; } else if (method.Signature.IsStatic) { Debug.Assert(method.OwningType.IsInterface); exactType = constrainedType; } else if (constrainedType.IsValueType) { pResult->thisTransform = CORINFO_THIS_TRANSFORM.CORINFO_BOX_THIS; } else { pResult->thisTransform = CORINFO_THIS_TRANSFORM.CORINFO_DEREF_THIS; } } MethodDesc targetMethod = methodAfterConstraintResolution; // // Initialize callee context used for inlining and instantiation arguments // if (targetMethod.HasInstantiation) { pResult->contextHandle = contextFromMethod(targetMethod); pResult->exactContextNeedsRuntimeLookup = targetMethod.IsSharedByGenericInstantiations; } else { pResult->contextHandle = contextFromType(exactType); pResult->exactContextNeedsRuntimeLookup = exactType.IsCanonicalSubtype(CanonicalFormKind.Any); // Use main method as the context as long as the methods are called on the same type if (pResult->exactContextNeedsRuntimeLookup && pResolvedToken.tokenContext == contextFromMethodBeingCompiled() && constrainedType == null && exactType == MethodBeingCompiled.OwningType && // But don't allow inlining into generic methods since the generic context won't be the same. // The scanner won't be able to predict such inlinig. See https://github.com/dotnet/runtimelab/pull/489 !MethodBeingCompiled.HasInstantiation) { var methodIL = (MethodIL)HandleToObject((IntPtr)pResolvedToken.tokenScope); var rawMethod = (MethodDesc)methodIL.GetMethodILDefinition().GetObject((int)pResolvedToken.token); if (IsTypeSpecForTypicalInstantiation(rawMethod.OwningType)) { pResult->contextHandle = contextFromMethodBeingCompiled(); } } } // // Determine whether to perform direct call // bool directCall = false; bool resolvedCallVirt = false; if (targetMethod.Signature.IsStatic) { if (constrainedType != null && (!resolvedConstraint || forceUseRuntimeLookup)) { // Constrained call to static virtual interface method we didn't resolve statically Debug.Assert(targetMethod.IsVirtual && targetMethod.OwningType.IsInterface); } else { // Static methods are always direct calls directCall = true; } } else if ((flags & CORINFO_CALLINFO_FLAGS.CORINFO_CALLINFO_CALLVIRT) == 0 || resolvedConstraint) { directCall = true; } else { if (!targetMethod.IsVirtual || // Final/sealed has no meaning for interfaces, but lets us devirtualize otherwise !targetMethod.OwningType.IsInterface && (targetMethod.IsFinal || targetMethod.OwningType.IsSealed())) { resolvedCallVirt = true; directCall = true; } } pResult->codePointerOrStubLookup.lookupKind.needsRuntimeLookup = false; bool allowInstParam = (flags & CORINFO_CALLINFO_FLAGS.CORINFO_CALLINFO_ALLOWINSTPARAM) != 0; if (directCall && targetMethod.IsAbstract) { ThrowHelper.ThrowBadImageFormatException(); } if (directCall && !allowInstParam && targetMethod.GetCanonMethodTarget(CanonicalFormKind.Specific).RequiresInstArg()) { // JIT needs a single address to call this method but the method needs a hidden argument. // We need a fat function pointer for this that captures both things. targetIsFatFunctionPointer = true; // JIT won't expect fat function pointers unless this is e.g. delegate creation Debug.Assert((flags & CORINFO_CALLINFO_FLAGS.CORINFO_CALLINFO_LDFTN) != 0); pResult->kind = CORINFO_CALL_KIND.CORINFO_CALL_CODE_POINTER; if (pResult->exactContextNeedsRuntimeLookup) { pResult->codePointerOrStubLookup.lookupKind.needsRuntimeLookup = true; pResult->codePointerOrStubLookup.lookupKind.runtimeLookupFlags = 0; pResult->codePointerOrStubLookup.runtimeLookup.indirections = CORINFO.USEHELPER; // Do not bother computing the runtime lookup if we are inlining. The JIT is going // to abort the inlining attempt anyway. if (pResolvedToken.tokenContext == contextFromMethodBeingCompiled()) { MethodDesc contextMethod = methodFromContext(pResolvedToken.tokenContext); pResult->codePointerOrStubLookup.lookupKind.runtimeLookupKind = GetGenericRuntimeLookupKind(contextMethod); pResult->codePointerOrStubLookup.lookupKind.runtimeLookupFlags = (ushort)ReadyToRunHelperId.MethodEntry; pResult->codePointerOrStubLookup.lookupKind.runtimeLookupArgs = (void*)ObjectToHandle(GetRuntimeDeterminedObjectForToken(ref pResolvedToken)); } else { pResult->codePointerOrStubLookup.lookupKind.runtimeLookupKind = CORINFO_RUNTIME_LOOKUP_KIND.CORINFO_LOOKUP_NOT_SUPPORTED; } } else { pResult->codePointerOrStubLookup.constLookup = CreateConstLookupToSymbol(_compilation.NodeFactory.FatFunctionPointer(targetMethod)); } } else if (directCall && resolvedConstraint && pResult->exactContextNeedsRuntimeLookup) { // We want to do a direct call to a shared method on a valuetype. We need to provide // a generic context, but the JitInterface doesn't provide a way for us to do it from here. // So we do the next best thing and ask RyuJIT to look up a fat pointer. pResult->kind = CORINFO_CALL_KIND.CORINFO_CALL_CODE_POINTER; pResult->codePointerOrStubLookup.constLookup.accessType = InfoAccessType.IAT_VALUE; pResult->nullInstanceCheck = !targetMethod.Signature.IsStatic; // We have the canonical version of the method - find the runtime determined version. // This is simplified because we know the method is on a valuetype. Debug.Assert(targetMethod.OwningType.IsValueType); TypeDesc runtimeDeterminedConstrainedType = (TypeDesc)GetRuntimeDeterminedObjectForToken(ref *pConstrainedResolvedToken); if (forceUseRuntimeLookup) { // The below logic would incorrectly resolve the lookup into the first match we found, // but there was a compile-time ambiguity due to shared code. The correct fix should // use the ConstrainedMethodUseLookupResult dictionary entry so that the exact // dispatch can be computed with the help of the generic dictionary. // We fail the compilation here to avoid bad codegen. This is not actually an invalid program. // https://github.com/dotnet/runtimelab/issues/1431 ThrowHelper.ThrowInvalidProgramException(); } MethodDesc targetOfLookup; if (runtimeDeterminedConstrainedType.IsRuntimeDeterminedType) targetOfLookup = _compilation.TypeSystemContext.GetMethodForRuntimeDeterminedType(targetMethod.GetTypicalMethodDefinition(), (RuntimeDeterminedType)runtimeDeterminedConstrainedType); else if (runtimeDeterminedConstrainedType.HasInstantiation) targetOfLookup = _compilation.TypeSystemContext.GetMethodForInstantiatedType(targetMethod.GetTypicalMethodDefinition(), (InstantiatedType)runtimeDeterminedConstrainedType); else targetOfLookup = targetMethod.GetMethodDefinition(); if (targetOfLookup.HasInstantiation) { var methodToGetInstantiation = (MethodDesc)GetRuntimeDeterminedObjectForToken(ref pResolvedToken); targetOfLookup = targetOfLookup.MakeInstantiatedMethod(methodToGetInstantiation.Instantiation); } Debug.Assert(targetOfLookup.GetCanonMethodTarget(CanonicalFormKind.Specific) == targetMethod.GetCanonMethodTarget(CanonicalFormKind.Specific)); ComputeLookup(ref pResolvedToken, targetOfLookup, ReadyToRunHelperId.MethodEntry, ref pResult->codePointerOrStubLookup); targetIsFatFunctionPointer = true; useFatCallTransform = true; } else if (directCall) { bool referencingArrayAddressMethod = false; if (targetMethod.IsIntrinsic) { // If this is an intrinsic method with a callsite-specific expansion, this will replace // the method with a method the intrinsic expands into. If it's not the special intrinsic, // method stays unchanged. var methodIL = (MethodIL)HandleToObject((IntPtr)pResolvedToken.tokenScope); targetMethod = _compilation.ExpandIntrinsicForCallsite(targetMethod, methodIL.OwningMethod); // For multidim array Address method, we pretend the method requires a hidden instantiation argument // (even though it doesn't need one). We'll actually swap the method out for a differnt one with // a matching calling convention later. See ArrayMethod for a description. referencingArrayAddressMethod = targetMethod.IsArrayAddressMethod(); } pResult->kind = CORINFO_CALL_KIND.CORINFO_CALL; TypeDesc owningType = targetMethod.OwningType; if (owningType.IsString && targetMethod.IsConstructor) { // Calling a string constructor doesn't call the actual constructor. pResult->codePointerOrStubLookup.constLookup = CreateConstLookupToSymbol( _compilation.NodeFactory.StringAllocator(targetMethod) ); } else if (owningType.IsArray && targetMethod.IsConstructor) { // Constructors on arrays are special and don't actually have entrypoints. // That would be fine by itself and wouldn't need special casing. But // constructors on SzArray have a weird property that causes them not to have canonical forms. // int[][] has a .ctor(int32,int32) to construct the jagged array in one go, but its canonical // form of __Canon[] doesn't have the two-parameter constructor. The canonical form would need // to have an unlimited number of constructors to cover stuff like "int[][][][][][]..." pResult->codePointerOrStubLookup.constLookup = default; } else if (pResult->exactContextNeedsRuntimeLookup) { // Nothing to do... The generic handle lookup gets embedded in to the codegen // during the jitting of the call. // (Note: The generic lookup in R2R is performed by a call to a helper at runtime, not by // codegen emitted at crossgen time) targetMethod = targetMethod.GetCanonMethodTarget(CanonicalFormKind.Specific); Debug.Assert(!forceUseRuntimeLookup); pResult->codePointerOrStubLookup.constLookup = CreateConstLookupToSymbol( GetMethodEntrypoint(pResolvedToken.tokenScope, targetMethod) ); } else { MethodDesc concreteMethod = targetMethod; targetMethod = targetMethod.GetCanonMethodTarget(CanonicalFormKind.Specific); ISymbolNode instParam = null; if (targetMethod.RequiresInstMethodDescArg()) { instParam = _compilation.NodeFactory.MethodGenericDictionary(concreteMethod); } else if (targetMethod.RequiresInstMethodTableArg() || referencingArrayAddressMethod) { // Ask for a constructed type symbol because we need the vtable to get to the dictionary instParam = _compilation.NodeFactory.ConstructedTypeSymbol(concreteMethod.OwningType); } if (instParam != null) { pResult->instParamLookup = CreateConstLookupToSymbol(instParam); } pResult->codePointerOrStubLookup.constLookup = CreateConstLookupToSymbol( GetMethodEntrypoint(pResolvedToken.tokenScope, targetMethod) ); } pResult->nullInstanceCheck = resolvedCallVirt; } else if (targetMethod.Signature.IsStatic) { // This should be an unresolved static virtual interface method call. Other static methods should // have been handled as a directCall above. Debug.Assert(targetMethod.OwningType.IsInterface && targetMethod.IsVirtual && constrainedType != null); pResult->kind = CORINFO_CALL_KIND.CORINFO_CALL_CODE_POINTER; TypeDesc runtimeDeterminedConstrainedType = (TypeDesc)GetRuntimeDeterminedObjectForToken(ref *pConstrainedResolvedToken); MethodDesc runtimeDeterminedInterfaceMethod = (MethodDesc)GetRuntimeDeterminedObjectForToken(ref pResolvedToken); ComputeLookup(ref pResolvedToken, new ConstrainedCallInfo(runtimeDeterminedConstrainedType, runtimeDeterminedInterfaceMethod), ReadyToRunHelperId.ConstrainedDirectCall, ref pResult->codePointerOrStubLookup); targetIsFatFunctionPointer = true; useFatCallTransform = true; pResult->nullInstanceCheck = false; } else if (targetMethod.HasInstantiation) { // Generic virtual method call support pResult->kind = CORINFO_CALL_KIND.CORINFO_VIRTUALCALL_LDVIRTFTN; pResult->codePointerOrStubLookup.constLookup.accessType = InfoAccessType.IAT_VALUE; pResult->nullInstanceCheck = true; MethodDesc targetOfLookup = _compilation.GetTargetOfGenericVirtualMethodCall((MethodDesc)GetRuntimeDeterminedObjectForToken(ref pResolvedToken)); _compilation.DetectGenericCycles( ((MethodILScope)HandleToObject((IntPtr)pResolvedToken.tokenScope)).OwningMethod, targetOfLookup.GetCanonMethodTarget(CanonicalFormKind.Specific)); ComputeLookup(ref pResolvedToken, targetOfLookup, ReadyToRunHelperId.MethodHandle, ref pResult->codePointerOrStubLookup); // RyuJIT will assert if we report CORINFO_CALLCONV_PARAMTYPE for a result of a ldvirtftn // We don't need an instantiation parameter, so let's just not report it. Might be nice to // move that assert to some place later though. targetIsFatFunctionPointer = true; } else if ((flags & CORINFO_CALLINFO_FLAGS.CORINFO_CALLINFO_LDFTN) == 0 && targetMethod.OwningType.IsInterface) { pResult->kind = CORINFO_CALL_KIND.CORINFO_VIRTUALCALL_STUB; if (pResult->exactContextNeedsRuntimeLookup) { ComputeLookup(ref pResolvedToken, GetRuntimeDeterminedObjectForToken(ref pResolvedToken), ReadyToRunHelperId.VirtualDispatchCell, ref pResult->codePointerOrStubLookup); Debug.Assert(pResult->codePointerOrStubLookup.lookupKind.needsRuntimeLookup); } else { pResult->codePointerOrStubLookup.lookupKind.needsRuntimeLookup = false; pResult->codePointerOrStubLookup.constLookup.accessType = InfoAccessType.IAT_PVALUE; pResult->codePointerOrStubLookup.constLookup.addr = (void*)ObjectToHandle( _compilation.NodeFactory.InterfaceDispatchCell(targetMethod #if !SUPPORT_JIT , _compilation.NameMangler.GetMangledMethodName(MethodBeingCompiled).ToString() #endif )); } pResult->nullInstanceCheck = false; } else if ((flags & CORINFO_CALLINFO_FLAGS.CORINFO_CALLINFO_LDFTN) == 0 // Canonically-equivalent types have the same vtable layout. Check the canonical form. // We don't want to accidentally ask about Foo<object, __Canon> that may or may not // be available to ask vtable questions about. // This can happen in inlining that the scanner didn't expect. && _compilation.HasFixedSlotVTable(targetMethod.OwningType.ConvertToCanonForm(CanonicalFormKind.Specific))) { pResult->kind = CORINFO_CALL_KIND.CORINFO_VIRTUALCALL_VTABLE; pResult->nullInstanceCheck = true; } else { ReadyToRunHelperId helperId; if ((flags & CORINFO_CALLINFO_FLAGS.CORINFO_CALLINFO_LDFTN) != 0) { pResult->kind = CORINFO_CALL_KIND.CORINFO_VIRTUALCALL_LDVIRTFTN; helperId = ReadyToRunHelperId.ResolveVirtualFunction; } else { // CORINFO_CALL_CODE_POINTER tells the JIT that this is indirect // call that should not be inlined. pResult->kind = CORINFO_CALL_KIND.CORINFO_CALL_CODE_POINTER; helperId = ReadyToRunHelperId.VirtualCall; } // If this is a non-interface call, we actually don't need a runtime lookup to find the target. // We don't even need to keep track of the runtime-determined method being called because the system ensures // that if e.g. Foo<__Canon>.GetHashCode is needed and we're generating a dictionary for Foo<string>, // Foo<string>.GetHashCode is needed too. if (pResult->exactContextNeedsRuntimeLookup && targetMethod.OwningType.IsInterface) { // We need JitInterface changes to fully support this. // If this is LDVIRTFTN of an interface method that is part of a verifiable delegate creation sequence, // RyuJIT is not going to use this value. Debug.Assert(helperId == ReadyToRunHelperId.ResolveVirtualFunction); pResult->exactContextNeedsRuntimeLookup = false; pResult->codePointerOrStubLookup.constLookup = CreateConstLookupToSymbol(_compilation.NodeFactory.ExternSymbol("NYI_LDVIRTFTN")); } else { pResult->exactContextNeedsRuntimeLookup = false; targetMethod = targetMethod.GetCanonMethodTarget(CanonicalFormKind.Specific); // Get the slot defining method to make sure our virtual method use tracking gets this right. // For normal C# code the targetMethod will always be newslot. MethodDesc slotDefiningMethod = targetMethod.IsNewSlot ? targetMethod : MetadataVirtualMethodAlgorithm.FindSlotDefiningMethodForVirtualMethod(targetMethod); pResult->codePointerOrStubLookup.constLookup = CreateConstLookupToSymbol( _compilation.NodeFactory.ReadyToRunHelper(helperId, slotDefiningMethod)); } // The current CoreRT ReadyToRun helpers do not handle null thisptr - ask the JIT to emit explicit null checks // TODO: Optimize this pResult->nullInstanceCheck = true; } pResult->hMethod = ObjectToHandle(targetMethod); pResult->accessAllowed = CorInfoIsAccessAllowedResult.CORINFO_ACCESS_ALLOWED; // We're pretty much done at this point. Let's grab the rest of the information that the jit is going to // need. pResult->classFlags = getClassAttribsInternal(targetMethod.OwningType); pResult->methodFlags = getMethodAttribsInternal(targetMethod); targetIsFatFunctionPointer |= (flags & CORINFO_CALLINFO_FLAGS.CORINFO_CALLINFO_CALLVIRT) != 0 && !(pResult->kind == CORINFO_CALL_KIND.CORINFO_CALL); Get_CORINFO_SIG_INFO(targetMethod, &pResult->sig, scope: null, targetIsFatFunctionPointer); if (useFatCallTransform) { pResult->sig.flags |= CorInfoSigInfoFlags.CORINFO_SIGFLAG_FAT_CALL; } if ((flags & CORINFO_CALLINFO_FLAGS.CORINFO_CALLINFO_VERIFICATION) != 0) { if (pResult->hMethod != pResolvedToken.hMethod) { pResult->verMethodFlags = getMethodAttribsInternal(targetMethod); Get_CORINFO_SIG_INFO(targetMethod, &pResult->verSig, scope: null); } else { pResult->verMethodFlags = pResult->methodFlags; pResult->verSig = pResult->sig; } } pResult->_wrapperDelegateInvoke = 0; } private CORINFO_CLASS_STRUCT_* embedClassHandle(CORINFO_CLASS_STRUCT_* handle, ref void* ppIndirection) { TypeDesc type = HandleToObject(handle); ISymbolNode typeHandleSymbol = _compilation.NecessaryTypeSymbolIfPossible(type); CORINFO_CLASS_STRUCT_* result = (CORINFO_CLASS_STRUCT_*)ObjectToHandle(typeHandleSymbol); if (typeHandleSymbol.RepresentsIndirectionCell) { ppIndirection = result; return null; } else { ppIndirection = null; return result; } } private void embedGenericHandle(ref CORINFO_RESOLVED_TOKEN pResolvedToken, bool fEmbedParent, ref CORINFO_GENERICHANDLE_RESULT pResult) { #if DEBUG // In debug, write some bogus data to the struct to ensure we have filled everything // properly. fixed (CORINFO_GENERICHANDLE_RESULT* tmp = &pResult) MemoryHelper.FillMemory((byte*)tmp, 0xcc, Marshal.SizeOf<CORINFO_GENERICHANDLE_RESULT>()); #endif ReadyToRunHelperId helperId = ReadyToRunHelperId.Invalid; object target = null; if (!fEmbedParent && pResolvedToken.hMethod != null) { MethodDesc md = HandleToObject(pResolvedToken.hMethod); TypeDesc td = HandleToObject(pResolvedToken.hClass); pResult.handleType = CorInfoGenericHandleType.CORINFO_HANDLETYPE_METHOD; Debug.Assert(md.OwningType == td); pResult.compileTimeHandle = (CORINFO_GENERIC_STRUCT_*)ObjectToHandle(md); if (pResolvedToken.tokenType == CorInfoTokenKind.CORINFO_TOKENKIND_Ldtoken) helperId = ReadyToRunHelperId.MethodHandle; else { Debug.Assert(pResolvedToken.tokenType == CorInfoTokenKind.CORINFO_TOKENKIND_Method); helperId = ReadyToRunHelperId.MethodDictionary; } target = GetRuntimeDeterminedObjectForToken(ref pResolvedToken); } else if (!fEmbedParent && pResolvedToken.hField != null) { FieldDesc fd = HandleToObject(pResolvedToken.hField); TypeDesc td = HandleToObject(pResolvedToken.hClass); pResult.handleType = CorInfoGenericHandleType.CORINFO_HANDLETYPE_FIELD; pResult.compileTimeHandle = (CORINFO_GENERIC_STRUCT_*)pResolvedToken.hField; Debug.Assert(pResolvedToken.tokenType == CorInfoTokenKind.CORINFO_TOKENKIND_Ldtoken); helperId = ReadyToRunHelperId.FieldHandle; target = GetRuntimeDeterminedObjectForToken(ref pResolvedToken); } else { TypeDesc td = HandleToObject(pResolvedToken.hClass); pResult.handleType = CorInfoGenericHandleType.CORINFO_HANDLETYPE_CLASS; pResult.compileTimeHandle = (CORINFO_GENERIC_STRUCT_*)pResolvedToken.hClass; object obj = GetRuntimeDeterminedObjectForToken(ref pResolvedToken); target = obj as TypeDesc; if (target == null) { Debug.Assert(fEmbedParent); if (obj is MethodDesc objAsMethod) { target = objAsMethod.OwningType; } else { Debug.Assert(obj is FieldDesc); target = ((FieldDesc)obj).OwningType; } } if (pResolvedToken.tokenType == CorInfoTokenKind.CORINFO_TOKENKIND_NewObj || pResolvedToken.tokenType == CorInfoTokenKind.CORINFO_TOKENKIND_Newarr || pResolvedToken.tokenType == CorInfoTokenKind.CORINFO_TOKENKIND_Box || pResolvedToken.tokenType == CorInfoTokenKind.CORINFO_TOKENKIND_Constrained) { helperId = ReadyToRunHelperId.TypeHandle; } else if (pResolvedToken.tokenType == CorInfoTokenKind.CORINFO_TOKENKIND_Casting) { helperId = ReadyToRunHelperId.TypeHandleForCasting; } else if (pResolvedToken.tokenType == CorInfoTokenKind.CORINFO_TOKENKIND_Ldtoken) { helperId = _compilation.GetLdTokenHelperForType(td); } else { helperId = ReadyToRunHelperId.NecessaryTypeHandle; } } Debug.Assert(pResult.compileTimeHandle != null); ComputeLookup(ref pResolvedToken, target, helperId, ref pResult.lookup); } private CORINFO_METHOD_STRUCT_* embedMethodHandle(CORINFO_METHOD_STRUCT_* handle, ref void* ppIndirection) { MethodDesc method = HandleToObject(handle); ISymbolNode methodHandleSymbol = _compilation.NodeFactory.RuntimeMethodHandle(method); CORINFO_METHOD_STRUCT_* result = (CORINFO_METHOD_STRUCT_*)ObjectToHandle(methodHandleSymbol); if (methodHandleSymbol.RepresentsIndirectionCell) { ppIndirection = result; return null; } else { ppIndirection = null; return result; } } private void getMethodVTableOffset(CORINFO_METHOD_STRUCT_* method, ref uint offsetOfIndirection, ref uint offsetAfterIndirection, ref bool isRelative) { MethodDesc methodDesc = HandleToObject(method); int pointerSize = _compilation.TypeSystemContext.Target.PointerSize; offsetOfIndirection = (uint)CORINFO_VIRTUALCALL_NO_CHUNK.Value; isRelative = false; // Normalize to the slot defining method. We don't have slot information for the overrides. methodDesc = MetadataVirtualMethodAlgorithm.FindSlotDefiningMethodForVirtualMethod(methodDesc); Debug.Assert(!methodDesc.CanMethodBeInSealedVTable()); // Avoid asking about slots on types like Foo<object, __Canon>. We might not have that information. // Canonically-equivalent types have the same slots, so ask for Foo<__Canon, __Canon>. methodDesc = methodDesc.GetCanonMethodTarget(CanonicalFormKind.Specific); int slot = VirtualMethodSlotHelper.GetVirtualMethodSlot(_compilation.NodeFactory, methodDesc, methodDesc.OwningType); if (slot == -1) { throw new InvalidOperationException(methodDesc.ToString()); } offsetAfterIndirection = (uint)(EETypeNode.GetVTableOffset(pointerSize) + slot * pointerSize); } private void expandRawHandleIntrinsic(ref CORINFO_RESOLVED_TOKEN pResolvedToken, ref CORINFO_GENERICHANDLE_RESULT pResult) { // Resolved token as a potentially RuntimeDetermined object. MethodDesc method = (MethodDesc)GetRuntimeDeterminedObjectForToken(ref pResolvedToken); switch (method.Name) { case "EETypePtrOf": case "MethodTableOf": ComputeLookup(ref pResolvedToken, method.Instantiation[0], ReadyToRunHelperId.TypeHandle, ref pResult.lookup); break; case "DefaultConstructorOf": ComputeLookup(ref pResolvedToken, method.Instantiation[0], ReadyToRunHelperId.DefaultConstructor, ref pResult.lookup); break; case "AllocatorOf": ComputeLookup(ref pResolvedToken, method.Instantiation[0], ReadyToRunHelperId.ObjectAllocator, ref pResult.lookup); break; } } private uint getMethodAttribs(CORINFO_METHOD_STRUCT_* ftn) { return getMethodAttribsInternal(HandleToObject(ftn)); } private void* getMethodSync(CORINFO_METHOD_STRUCT_* ftn, ref void* ppIndirection) { MethodDesc method = HandleToObject(ftn); TypeDesc type = method.OwningType; ISymbolNode methodSync = _compilation.NecessaryTypeSymbolIfPossible(type); void* result = (void*)ObjectToHandle(methodSync); if (methodSync.RepresentsIndirectionCell) { ppIndirection = result; return null; } else { ppIndirection = null; return result; } } private unsafe HRESULT allocPgoInstrumentationBySchema(CORINFO_METHOD_STRUCT_* ftnHnd, PgoInstrumentationSchema* pSchema, uint countSchemaItems, byte** pInstrumentationData) { throw new NotImplementedException("allocPgoInstrumentationBySchema"); } private CORINFO_CLASS_STRUCT_* getLikelyClass(CORINFO_METHOD_STRUCT_* ftnHnd, CORINFO_CLASS_STRUCT_* baseHnd, uint IlOffset, ref uint pLikelihood, ref uint pNumberOfClasses) { return null; } private void getAddressOfPInvokeTarget(CORINFO_METHOD_STRUCT_* method, ref CORINFO_CONST_LOOKUP pLookup) { MethodDesc md = HandleToObject(method); string externName = _compilation.PInvokeILProvider.GetDirectCallExternName(md); pLookup = CreateConstLookupToSymbol(_compilation.NodeFactory.ExternSymbol(externName)); } private void getGSCookie(IntPtr* pCookieVal, IntPtr** ppCookieVal) { // TODO: fully implement GS cookies if (pCookieVal != null) { if (PointerSize == 4) { *pCookieVal = (IntPtr)0x3F796857; } else { *pCookieVal = (IntPtr)0x216D6F6D202C6948; } *ppCookieVal = null; } else { throw new NotImplementedException("getGSCookie"); } } private bool pInvokeMarshalingRequired(CORINFO_METHOD_STRUCT_* handle, CORINFO_SIG_INFO* callSiteSig) { // calli is covered by convertPInvokeCalliToCall if (handle == null) { #if DEBUG MethodSignature methodSignature = (MethodSignature)HandleToObject((IntPtr)callSiteSig->pSig); MethodDesc stub = _compilation.PInvokeILProvider.GetCalliStub( methodSignature, ((MetadataType)HandleToObject(callSiteSig->scope).OwningMethod.OwningType).Module); Debug.Assert(!IsPInvokeStubRequired(stub)); #endif return false; } MethodDesc method = HandleToObject(handle); if (method.IsRawPInvoke()) return false; // Stub is required to trigger precise static constructor TypeDesc owningType = method.OwningType; if (_compilation.HasLazyStaticConstructor(owningType) && !((MetadataType)owningType).IsBeforeFieldInit) return true; // We could have given back the PInvoke stub IL to the JIT and let it inline it, without // checking whether there is any stub required. Save the JIT from doing the inlining by checking upfront. return IsPInvokeStubRequired(method); } private bool convertPInvokeCalliToCall(ref CORINFO_RESOLVED_TOKEN pResolvedToken, bool mustConvert) { var methodIL = (MethodIL)HandleToObject((IntPtr)pResolvedToken.tokenScope); // Suppress recursive expansion of calli in marshaling stubs if (methodIL is Internal.IL.Stubs.PInvokeILStubMethodIL) return false; MethodSignature signature = (MethodSignature)methodIL.GetObject((int)pResolvedToken.token); if ((signature.Flags & MethodSignatureFlags.UnmanagedCallingConventionMask) == 0) return false; MethodDesc stub = _compilation.PInvokeILProvider.GetCalliStub( signature, ((MetadataType)methodIL.OwningMethod.OwningType).Module); if (!mustConvert && !IsPInvokeStubRequired(stub)) return false; pResolvedToken.hMethod = ObjectToHandle(stub); pResolvedToken.hClass = ObjectToHandle(stub.OwningType); return true; } private bool IsPInvokeStubRequired(MethodDesc method) { if (_compilation.GetMethodIL(method) is Internal.IL.Stubs.PInvokeILStubMethodIL stub) return stub.IsStubRequired; // This path is taken for PInvokes replaced by RemovingILProvider return true; } private int SizeOfPInvokeTransitionFrame { get { // struct PInvokeTransitionFrame: // #ifdef _TARGET_ARM_ // m_ChainPointer // #endif // m_RIP // m_FramePointer // m_pThread // m_Flags + align (no align for ARM64 that has 64 bit m_Flags) // m_PreserverRegs - RSP // No need to save other preserved regs because of the JIT ensures that there are // no live GC references in callee saved registers around the PInvoke callsite. int size = 5 * this.PointerSize; if (_compilation.TypeSystemContext.Target.Architecture == TargetArchitecture.ARM) size += this.PointerSize; // m_ChainPointer return size; } } private bool canGetCookieForPInvokeCalliSig(CORINFO_SIG_INFO* szMetaSig) { throw new NotImplementedException("canGetCookieForPInvokeCalliSig"); } private void classMustBeLoadedBeforeCodeIsRun(CORINFO_CLASS_STRUCT_* cls) { } private void setEHcount(uint cEH) { _ehClauses = new CORINFO_EH_CLAUSE[cEH]; } private void setEHinfo(uint EHnumber, ref CORINFO_EH_CLAUSE clause) { _ehClauses[EHnumber] = clause; } private void reportInliningDecision(CORINFO_METHOD_STRUCT_* inlinerHnd, CORINFO_METHOD_STRUCT_* inlineeHnd, CorInfoInline inlineResult, byte* reason) { } private void updateEntryPointForTailCall(ref CORINFO_CONST_LOOKUP entryPoint) { } private int* getAddrOfCaptureThreadGlobal(ref void* ppIndirection) { ppIndirection = null; return (int*)ObjectToHandle(_compilation.NodeFactory.ExternSymbol("RhpTrapThreads")); } private void getFieldInfo(ref CORINFO_RESOLVED_TOKEN pResolvedToken, CORINFO_METHOD_STRUCT_* callerHandle, CORINFO_ACCESS_FLAGS flags, CORINFO_FIELD_INFO* pResult) { #if DEBUG // In debug, write some bogus data to the struct to ensure we have filled everything // properly. MemoryHelper.FillMemory((byte*)pResult, 0xcc, Marshal.SizeOf<CORINFO_FIELD_INFO>()); #endif Debug.Assert(((int)flags & ((int)CORINFO_ACCESS_FLAGS.CORINFO_ACCESS_GET | (int)CORINFO_ACCESS_FLAGS.CORINFO_ACCESS_SET | (int)CORINFO_ACCESS_FLAGS.CORINFO_ACCESS_ADDRESS | (int)CORINFO_ACCESS_FLAGS.CORINFO_ACCESS_INIT_ARRAY)) != 0); var field = HandleToObject(pResolvedToken.hField); CORINFO_FIELD_ACCESSOR fieldAccessor; CORINFO_FIELD_FLAGS fieldFlags = (CORINFO_FIELD_FLAGS)0; uint fieldOffset = (field.IsStatic && field.HasRva ? 0xBAADF00D : (uint)field.Offset.AsInt); if (field.IsStatic) { fieldFlags |= CORINFO_FIELD_FLAGS.CORINFO_FLG_FIELD_STATIC; if (field.HasRva) { fieldFlags |= CORINFO_FIELD_FLAGS.CORINFO_FLG_FIELD_UNMANAGED; // TODO: Handle the case when the RVA is in the TLS range fieldAccessor = CORINFO_FIELD_ACCESSOR.CORINFO_FIELD_STATIC_RVA_ADDRESS; // We are not going through a helper. The constructor has to be triggered explicitly. if (_compilation.HasLazyStaticConstructor(field.OwningType)) { fieldFlags |= CORINFO_FIELD_FLAGS.CORINFO_FLG_FIELD_INITCLASS; } } else if (field.OwningType.IsCanonicalSubtype(CanonicalFormKind.Any)) { // The JIT wants to know how to access a static field on a generic type. We need a runtime lookup. fieldAccessor = CORINFO_FIELD_ACCESSOR.CORINFO_FIELD_STATIC_READYTORUN_HELPER; pResult->helper = CorInfoHelpFunc.CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE; // Don't try to compute the runtime lookup if we're inlining. The JIT is going to abort the inlining // attempt anyway. if (pResolvedToken.tokenContext == contextFromMethodBeingCompiled()) { MethodDesc contextMethod = methodFromContext(pResolvedToken.tokenContext); FieldDesc runtimeDeterminedField = (FieldDesc)GetRuntimeDeterminedObjectForToken(ref pResolvedToken); ReadyToRunHelperId helperId; // Find out what kind of base do we need to look up. if (field.IsThreadStatic) { helperId = ReadyToRunHelperId.GetThreadStaticBase; } else if (field.HasGCStaticBase) { helperId = ReadyToRunHelperId.GetGCStaticBase; } else { helperId = ReadyToRunHelperId.GetNonGCStaticBase; } // What generic context do we look up the base from. ISymbolNode helper; if (contextMethod.AcquiresInstMethodTableFromThis() || contextMethod.RequiresInstMethodTableArg()) { helper = _compilation.NodeFactory.ReadyToRunHelperFromTypeLookup( helperId, runtimeDeterminedField.OwningType, contextMethod.OwningType); } else { Debug.Assert(contextMethod.RequiresInstMethodDescArg()); helper = _compilation.NodeFactory.ReadyToRunHelperFromDictionaryLookup( helperId, runtimeDeterminedField.OwningType, contextMethod); } pResult->fieldLookup = CreateConstLookupToSymbol(helper); } } else { fieldAccessor = CORINFO_FIELD_ACCESSOR.CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER; pResult->helper = CorInfoHelpFunc.CORINFO_HELP_READYTORUN_STATIC_BASE; ReadyToRunHelperId helperId = ReadyToRunHelperId.Invalid; CORINFO_FIELD_ACCESSOR intrinsicAccessor; if (field.IsIntrinsic && (flags & CORINFO_ACCESS_FLAGS.CORINFO_ACCESS_GET) != 0 && (intrinsicAccessor = getFieldIntrinsic(field)) != (CORINFO_FIELD_ACCESSOR)(-1)) { fieldAccessor = intrinsicAccessor; } else if (field.IsThreadStatic) { helperId = ReadyToRunHelperId.GetThreadStaticBase; } else { helperId = field.HasGCStaticBase ? ReadyToRunHelperId.GetGCStaticBase : ReadyToRunHelperId.GetNonGCStaticBase; // // Currently, we only do this optimization for regular statics, but it // looks like it may be permissible to do this optimization for // thread statics as well. // if ((flags & CORINFO_ACCESS_FLAGS.CORINFO_ACCESS_ADDRESS) != 0 && (fieldAccessor != CORINFO_FIELD_ACCESSOR.CORINFO_FIELD_STATIC_TLS)) { fieldFlags |= CORINFO_FIELD_FLAGS.CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN; } } if (helperId != ReadyToRunHelperId.Invalid) { pResult->fieldLookup = CreateConstLookupToSymbol( _compilation.NodeFactory.ReadyToRunHelper(helperId, field.OwningType)); } } } else { fieldAccessor = CORINFO_FIELD_ACCESSOR.CORINFO_FIELD_INSTANCE; } if (field.IsInitOnly) fieldFlags |= CORINFO_FIELD_FLAGS.CORINFO_FLG_FIELD_FINAL; pResult->fieldAccessor = fieldAccessor; pResult->fieldFlags = fieldFlags; pResult->fieldType = getFieldType(pResolvedToken.hField, &pResult->structType, pResolvedToken.hClass); pResult->accessAllowed = CorInfoIsAccessAllowedResult.CORINFO_ACCESS_ALLOWED; pResult->offset = fieldOffset; // TODO: We need to implement access checks for fields and methods. See JitInterface.cpp in mrtjit // and STS::AccessCheck::CanAccess. } } }
1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/coreclr/tools/aot/ILCompiler.TypeSystem.Tests/CoreTestAssembly/Platform.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma warning disable 649 #pragma warning disable 169 namespace System { // Dummy core types to allow us compiling this assembly as a core library so that the type // system tests don't have a dependency on a real core library. public class Object { internal IntPtr m_pEEType; public virtual bool Equals(object other) { return false; } public virtual int GetHashCode() { return 0; } public virtual string ToString() { return null; } ~Object() { } } public struct Void { } public struct Boolean { } public struct Char { } public struct SByte { } public struct Byte { } public struct Int16 { } public struct UInt16 { } public struct Int32 { } public struct UInt32 { } public struct Int64 { } public struct UInt64 { } public struct IntPtr { } public struct UIntPtr { } public struct Single { } public struct Double { } public abstract class ValueType { } public abstract class Enum : ValueType { } public struct Nullable<T> where T : struct { } public sealed class String { } public abstract class Array : System.Collections.IList { } public abstract class Delegate { } public abstract class MulticastDelegate : Delegate { } public struct RuntimeTypeHandle { } public struct RuntimeMethodHandle { } public struct RuntimeFieldHandle { } public class Attribute { } public class ThreadStaticAttribute : Attribute { } public class Array<T> : Array, System.Collections.Generic.IList<T> { } public class Exception { } public ref struct TypedReference { private readonly ByReference<byte> _value; private readonly RuntimeTypeHandle _typeHandle; } public ref struct ByReference<T> { } } namespace System.Collections { interface IEnumerable { } interface ICollection : IEnumerable { } interface IList : ICollection { } } namespace System.Collections.Generic { interface IEnumerable<out T> { } interface ICollection<T> : IEnumerable<T> { } interface IList<T> : ICollection<T> { } } namespace System.Runtime.InteropServices { public enum LayoutKind { Sequential = 0, // 0x00000008, Explicit = 2, // 0x00000010, Auto = 3, // 0x00000000, } public sealed class StructLayoutAttribute : Attribute { internal LayoutKind _val; public StructLayoutAttribute(LayoutKind layoutKind) { _val = layoutKind; } public LayoutKind Value { get { return _val; } } public int Pack; public int Size; } public sealed class FieldOffsetAttribute : Attribute { private int _val; public FieldOffsetAttribute(int offset) { _val = offset; } public int Value { get { return _val; } } } } namespace System.Runtime.CompilerServices { public sealed class IsByRefLikeAttribute : Attribute { } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma warning disable 649 #pragma warning disable 169 namespace System { // Dummy core types to allow us compiling this assembly as a core library so that the type // system tests don't have a dependency on a real core library. public class Object { internal IntPtr m_pEEType; public virtual bool Equals(object other) { return false; } public virtual int GetHashCode() { return 0; } public virtual string ToString() { return null; } ~Object() { } } public struct Void { } public struct Boolean { } public struct Char { } public struct SByte { } public struct Byte { } public struct Int16 { } public struct UInt16 { } public struct Int32 { } public struct UInt32 { } public struct Int64 { } public struct UInt64 { } public struct IntPtr { } public struct UIntPtr { } public struct Single { } public struct Double { } public abstract class ValueType { } public abstract class Enum : ValueType { } public struct Nullable<T> where T : struct { } public sealed class String { } public abstract class Array : System.Collections.IList { } public abstract class Delegate { } public abstract class MulticastDelegate : Delegate { } public struct RuntimeTypeHandle { } public struct RuntimeMethodHandle { } public struct RuntimeFieldHandle { } public class Attribute { } public class ThreadStaticAttribute : Attribute { } public class Array<T> : Array, System.Collections.Generic.IList<T> { } public class Exception { } public ref struct TypedReference { private readonly ByReference<byte> _value; private readonly RuntimeTypeHandle _typeHandle; } public ref struct ByReference<T> { } } namespace System.Collections { interface IEnumerable { } interface ICollection : IEnumerable { } interface IList : ICollection { } } namespace System.Collections.Generic { interface IEnumerable<out T> { } interface ICollection<T> : IEnumerable<T> { } interface IList<T> : ICollection<T> { } } namespace System.Runtime.InteropServices { public enum LayoutKind { Sequential = 0, // 0x00000008, Explicit = 2, // 0x00000010, Auto = 3, // 0x00000000, } public sealed class StructLayoutAttribute : Attribute { internal LayoutKind _val; public StructLayoutAttribute(LayoutKind layoutKind) { _val = layoutKind; } public LayoutKind Value { get { return _val; } } public int Pack; public int Size; } public sealed class FieldOffsetAttribute : Attribute { private int _val; public FieldOffsetAttribute(int offset) { _val = offset; } public int Value { get { return _val; } } } } namespace System.Runtime.CompilerServices { public sealed class IsByRefLikeAttribute : Attribute { } public static class RuntimeFeature { public const string VirtualStaticsInInterfaces = nameof(VirtualStaticsInInterfaces); } }
1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/coreclr/tools/aot/ILCompiler.TypeSystem.Tests/ILCompiler.TypeSystem.Tests.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <AssemblyName>ILCompiler.TypeSystem.Tests</AssemblyName> <TargetFramework>$(NetCoreAppToolCurrent)</TargetFramework> <Configurations>Debug;Release;Checked</Configurations> <!-- This seems to be required for supporting assemblies to be copied into the output --> <CopyLocalLockFileAssemblies>true</CopyLocalLockFileAssemblies> <TestRunnerAdditionalArguments>-notrait category=failing</TestRunnerAdditionalArguments> <!-- xunit.runner.visualstudio is restored for .NET Framework instead of Core--> <NoWarn>$(NoWarn);NU1701</NoWarn> <!-- By default the subdirectories containing CoreTestAssembly and ILTestAssembly would be included in compilation of this project --> <EnableDefaultItems>false</EnableDefaultItems> <Platforms>AnyCPU;x64</Platforms> <PlatformTarget>AnyCPU</PlatformTarget> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <!-- Avoids having to include InteropStateManager.cs to get files in Common/TypeSystem/Interop/IL building --> <DefineConstants>READYTORUN;$(DefineConstants)</DefineConstants> </PropertyGroup> <ItemGroup> <PackageReference Include="xunit.core" Version="$(XUnitVersion)" ExcludeAssets="build" /> <PackageReference Include="Microsoft.DotNet.XUnitExtensions" Version="$(MicrosoftDotNetXUnitExtensionsVersion)" /> <PackageReference Include="System.Reflection.Metadata" Version="$(SystemReflectionMetadataVersion)" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\ILCompiler.TypeSystem\ILCompiler.TypeSystem.csproj" /> <!-- Make sure the test data gets built --> <ProjectReference Include="CoreTestAssembly\CoreTestAssembly.csproj"> <ReferenceOutputAssembly>false</ReferenceOutputAssembly> <OutputItemType>Content</OutputItemType> <CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory> </ProjectReference> <ProjectReference Include="ILTestAssembly\ILTestAssembly.ilproj"> <ReferenceOutputAssembly>false</ReferenceOutputAssembly> <OutputItemType>Content</OutputItemType> <CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory> </ProjectReference> </ItemGroup> <ItemGroup> <Compile Include="../../Common/TypeSystem/MetadataEmitter/TypeSystemMetadataEmitter.cs" /> <Compile Include="../../Common/TypeSystem/IL/ILReader.cs" /> <Compile Include="../../Common/TypeSystem/Interop/IL/MarshalUtils.cs" Link="TypeSystem/Interop/IL/MarshalUtils.cs" /> <Compile Include="../../Common/TypeSystem/Interop/IL/MarshalHelpers.cs" Link="TypeSystem/Interop/IL/MarshalHelpers.cs" /> <Compile Include="../../Common/TypeSystem/Interop/IL/MarshallerKind.cs" Link="TypeSystem/Interop/IL/MarshallerKind.cs" /> <Compile Include="../../Common/TypeSystem/Interop/InteropTypes.cs" Link="TypeSystem/Interop/InteropTypes.cs" /> <Compile Include="../../Common/TypeSystem/IL/HelperExtensions.cs" Link="TypeSystem/IL/HelperExtensions" /> <Compile Include="ArchitectureSpecificFieldLayoutTests.cs" /> <Compile Include="CanonicalizationTests.cs" /> <Compile Include="ConstraintsValidationTest.cs" /> <Compile Include="GCPointerMapTests.cs" /> <Compile Include="GenericTypeAndMethodTests.cs" /> <Compile Include="CastingTests.cs" /> <Compile Include="DefType.FieldLayoutTests.cs" /> <Compile Include="HashcodeTests.cs" /> <Compile Include="ILDisassemblerTests.cs" /> <Compile Include="InterfacesTests.cs" /> <Compile Include="RuntimeDeterminedTypesTests.cs" /> <Compile Include="SignatureTests.cs" /> <Compile Include="SyntheticVirtualOverrideTests.cs" /> <Compile Include="SyntheticVirtualOverrideTests.DiagnosticName.cs" /> <Compile Include="TestMetadataFieldLayoutAlgorithm.cs" /> <Compile Include="TypeNameParsingTests.cs" /> <Compile Include="UniversalGenericFieldLayoutTests.cs" /> <Compile Include="ValueTypeShapeCharacteristicsTests.cs" /> <Compile Include="VirtualFunctionOverrideTests.cs" /> <Compile Include="InstanceFieldLayoutTests.cs" /> <Compile Include="StaticFieldLayoutTests.cs" /> <Compile Include="TestTypeSystemContext.cs" /> <Compile Include="WellKnownTypeTests.cs" /> <Compile Include="ExceptionStringTests.cs" /> <Compile Include="MarshalUtilsTests.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <AssemblyName>ILCompiler.TypeSystem.Tests</AssemblyName> <TargetFramework>$(NetCoreAppToolCurrent)</TargetFramework> <Configurations>Debug;Release;Checked</Configurations> <!-- This seems to be required for supporting assemblies to be copied into the output --> <CopyLocalLockFileAssemblies>true</CopyLocalLockFileAssemblies> <TestRunnerAdditionalArguments>-notrait category=failing</TestRunnerAdditionalArguments> <!-- xunit.runner.visualstudio is restored for .NET Framework instead of Core--> <NoWarn>$(NoWarn);NU1701</NoWarn> <!-- By default the subdirectories containing CoreTestAssembly and ILTestAssembly would be included in compilation of this project --> <EnableDefaultItems>false</EnableDefaultItems> <Platforms>AnyCPU;x64</Platforms> <PlatformTarget>AnyCPU</PlatformTarget> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <!-- Avoids having to include InteropStateManager.cs to get files in Common/TypeSystem/Interop/IL building --> <DefineConstants>READYTORUN;$(DefineConstants)</DefineConstants> </PropertyGroup> <ItemGroup> <PackageReference Include="xunit.core" Version="$(XUnitVersion)" ExcludeAssets="build" /> <PackageReference Include="Microsoft.DotNet.XUnitExtensions" Version="$(MicrosoftDotNetXUnitExtensionsVersion)" /> <PackageReference Include="System.Reflection.Metadata" Version="$(SystemReflectionMetadataVersion)" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\ILCompiler.TypeSystem\ILCompiler.TypeSystem.csproj" /> <!-- Make sure the test data gets built --> <ProjectReference Include="CoreTestAssembly\CoreTestAssembly.csproj"> <ReferenceOutputAssembly>false</ReferenceOutputAssembly> <OutputItemType>Content</OutputItemType> <CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory> </ProjectReference> <ProjectReference Include="ILTestAssembly\ILTestAssembly.ilproj"> <ReferenceOutputAssembly>false</ReferenceOutputAssembly> <OutputItemType>Content</OutputItemType> <CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory> </ProjectReference> </ItemGroup> <ItemGroup> <Compile Include="../../Common/TypeSystem/MetadataEmitter/TypeSystemMetadataEmitter.cs" /> <Compile Include="../../Common/TypeSystem/IL/ILReader.cs" /> <Compile Include="../../Common/TypeSystem/Interop/IL/MarshalUtils.cs" Link="TypeSystem/Interop/IL/MarshalUtils.cs" /> <Compile Include="../../Common/TypeSystem/Interop/IL/MarshalHelpers.cs" Link="TypeSystem/Interop/IL/MarshalHelpers.cs" /> <Compile Include="../../Common/TypeSystem/Interop/IL/MarshallerKind.cs" Link="TypeSystem/Interop/IL/MarshallerKind.cs" /> <Compile Include="../../Common/TypeSystem/Interop/InteropTypes.cs" Link="TypeSystem/Interop/InteropTypes.cs" /> <Compile Include="../../Common/TypeSystem/IL/HelperExtensions.cs" Link="TypeSystem/IL/HelperExtensions" /> <Compile Include="ArchitectureSpecificFieldLayoutTests.cs" /> <Compile Include="CanonicalizationTests.cs" /> <Compile Include="ConstraintsValidationTest.cs" /> <Compile Include="GCPointerMapTests.cs" /> <Compile Include="GenericTypeAndMethodTests.cs" /> <Compile Include="CastingTests.cs" /> <Compile Include="DefType.FieldLayoutTests.cs" /> <Compile Include="HashcodeTests.cs" /> <Compile Include="ILDisassemblerTests.cs" /> <Compile Include="InterfacesTests.cs" /> <Compile Include="RuntimeDeterminedTypesTests.cs" /> <Compile Include="SignatureTests.cs" /> <Compile Include="SyntheticVirtualOverrideTests.cs" /> <Compile Include="SyntheticVirtualOverrideTests.DiagnosticName.cs" /> <Compile Include="TestMetadataFieldLayoutAlgorithm.cs" /> <Compile Include="TypeNameParsingTests.cs" /> <Compile Include="UniversalGenericFieldLayoutTests.cs" /> <Compile Include="ValueTypeShapeCharacteristicsTests.cs" /> <Compile Include="VirtualFunctionOverrideTests.cs" /> <Compile Include="InstanceFieldLayoutTests.cs" /> <Compile Include="StaticFieldLayoutTests.cs" /> <Compile Include="TestTypeSystemContext.cs" /> <Compile Include="VirtualStaticInterfaceMethodTests.cs" /> <Compile Include="WellKnownTypeTests.cs" /> <Compile Include="ExceptionStringTests.cs" /> <Compile Include="MarshalUtilsTests.cs" /> </ItemGroup> </Project>
1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/tests/nativeaot/SmokeTests/UnitTests/Interfaces.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Text; using System.Collections.Generic; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; public class Interfaces { const int Pass = 100; const int Fail = -1; public static int Run() { if (TestInterfaceCache() == Fail) return Fail; if (TestAVInInterfaceCache() == Fail) return Fail; if (TestMultipleInterfaces() == Fail) return Fail; if (TestArrayInterfaces() == Fail) return Fail; if (TestVariantInterfaces() == Fail) return Fail; if (TestSpecialArrayInterfaces() == Fail) return Fail; if (TestIterfaceCallOptimization() == Fail) return Fail; TestDefaultInterfaceMethods.Run(); TestDefaultInterfaceVariance.Run(); TestVariantInterfaceOptimizations.Run(); TestSharedIntefaceMethods.Run(); TestCovariantReturns.Run(); TestDynamicInterfaceCastable.Run(); return Pass; } private static MyInterface[] MakeInterfaceArray() { MyInterface[] itfs = new MyInterface[50]; itfs[0] = new Foo0(); itfs[1] = new Foo1(); itfs[2] = new Foo2(); itfs[3] = new Foo3(); itfs[4] = new Foo4(); itfs[5] = new Foo5(); itfs[6] = new Foo6(); itfs[7] = new Foo7(); itfs[8] = new Foo8(); itfs[9] = new Foo9(); itfs[10] = new Foo10(); itfs[11] = new Foo11(); itfs[12] = new Foo12(); itfs[13] = new Foo13(); itfs[14] = new Foo14(); itfs[15] = new Foo15(); itfs[16] = new Foo16(); itfs[17] = new Foo17(); itfs[18] = new Foo18(); itfs[19] = new Foo19(); itfs[20] = new Foo20(); itfs[21] = new Foo21(); itfs[22] = new Foo22(); itfs[23] = new Foo23(); itfs[24] = new Foo24(); itfs[25] = new Foo25(); itfs[26] = new Foo26(); itfs[27] = new Foo27(); itfs[28] = new Foo28(); itfs[29] = new Foo29(); itfs[30] = new Foo30(); itfs[31] = new Foo31(); itfs[32] = new Foo32(); itfs[33] = new Foo33(); itfs[34] = new Foo34(); itfs[35] = new Foo35(); itfs[36] = new Foo36(); itfs[37] = new Foo37(); itfs[38] = new Foo38(); itfs[39] = new Foo39(); itfs[40] = new Foo40(); itfs[41] = new Foo41(); itfs[42] = new Foo42(); itfs[43] = new Foo43(); itfs[44] = new Foo44(); itfs[45] = new Foo45(); itfs[46] = new Foo46(); itfs[47] = new Foo47(); itfs[48] = new Foo48(); itfs[49] = new Foo49(); return itfs; } #region Interface Dispatch Cache Test private static int TestInterfaceCache() { MyInterface[] itfs = MakeInterfaceArray(); StringBuilder sb = new StringBuilder(); int counter = 0; for (int i = 0; i < 50; i++) { sb.Append(itfs[i].GetAString()); counter += itfs[i].GetAnInt(); } string expected = "Foo0Foo1Foo2Foo3Foo4Foo5Foo6Foo7Foo8Foo9Foo10Foo11Foo12Foo13Foo14Foo15Foo16Foo17Foo18Foo19Foo20Foo21Foo22Foo23Foo24Foo25Foo26Foo27Foo28Foo29Foo30Foo31Foo32Foo33Foo34Foo35Foo36Foo37Foo38Foo39Foo40Foo41Foo42Foo43Foo44Foo45Foo46Foo47Foo48Foo49"; if (!expected.Equals(sb.ToString())) { Console.WriteLine("Concatenating strings from interface calls failed."); Console.Write("Expected: "); Console.WriteLine(expected); Console.Write(" Actual: "); Console.WriteLine(sb.ToString()); return Fail; } if (counter != 1225) { Console.WriteLine("Summing ints from interface calls failed."); Console.WriteLine("Expected: 1225"); Console.Write("Actual: "); Console.WriteLine(counter); return Fail; } return 100; } private static int TestAVInInterfaceCache() { MyInterface[] itfs = MakeInterfaceArray(); MyInterface[] testArray = new MyInterface[itfs.Length * 2]; for (int i = 0; i < itfs.Length; i++) { testArray[i * 2 + 1] = itfs[i]; } int numExceptions = 0; // Make sure AV in dispatch helpers is translated to NullRef for (int i = 0; i < testArray.Length; i++) { try { testArray[i].GetAnInt(); } catch (NullReferenceException) { numExceptions++; } } // Make sure there's no trouble with unwinding out of the dispatch helper InterfaceWithManyParameters testInstance = null; for (int i = 0; i < 3; i++) { try { testInstance.ManyParameters(0, 0, 0, 0, 0, 0, 0, 0); } catch (NullReferenceException) { numExceptions++; } if (testInstance == null) testInstance = new ClassWithManyParameters(); else testInstance = null; } return numExceptions == itfs.Length + 2 ? Pass : Fail; } interface MyInterface { int GetAnInt(); string GetAString(); } interface InterfaceWithManyParameters { int ManyParameters(int a, int b, int c, int d, int e, int f, int g, int h); } class ClassWithManyParameters : InterfaceWithManyParameters { public int ManyParameters(int a, int b, int c, int d, int e, int f, int g, int h) => 42; } class Foo0 : MyInterface { public int GetAnInt() { return 0; } public string GetAString() { return "Foo0"; } } class Foo1 : MyInterface { public int GetAnInt() { return 1; } public string GetAString() { return "Foo1"; } } class Foo2 : MyInterface { public int GetAnInt() { return 2; } public string GetAString() { return "Foo2"; } } class Foo3 : MyInterface { public int GetAnInt() { return 3; } public string GetAString() { return "Foo3"; } } class Foo4 : MyInterface { public int GetAnInt() { return 4; } public string GetAString() { return "Foo4"; } } class Foo5 : MyInterface { public int GetAnInt() { return 5; } public string GetAString() { return "Foo5"; } } class Foo6 : MyInterface { public int GetAnInt() { return 6; } public string GetAString() { return "Foo6"; } } class Foo7 : MyInterface { public int GetAnInt() { return 7; } public string GetAString() { return "Foo7"; } } class Foo8 : MyInterface { public int GetAnInt() { return 8; } public string GetAString() { return "Foo8"; } } class Foo9 : MyInterface { public int GetAnInt() { return 9; } public string GetAString() { return "Foo9"; } } class Foo10 : MyInterface { public int GetAnInt() { return 10; } public string GetAString() { return "Foo10"; } } class Foo11 : MyInterface { public int GetAnInt() { return 11; } public string GetAString() { return "Foo11"; } } class Foo12 : MyInterface { public int GetAnInt() { return 12; } public string GetAString() { return "Foo12"; } } class Foo13 : MyInterface { public int GetAnInt() { return 13; } public string GetAString() { return "Foo13"; } } class Foo14 : MyInterface { public int GetAnInt() { return 14; } public string GetAString() { return "Foo14"; } } class Foo15 : MyInterface { public int GetAnInt() { return 15; } public string GetAString() { return "Foo15"; } } class Foo16 : MyInterface { public int GetAnInt() { return 16; } public string GetAString() { return "Foo16"; } } class Foo17 : MyInterface { public int GetAnInt() { return 17; } public string GetAString() { return "Foo17"; } } class Foo18 : MyInterface { public int GetAnInt() { return 18; } public string GetAString() { return "Foo18"; } } class Foo19 : MyInterface { public int GetAnInt() { return 19; } public string GetAString() { return "Foo19"; } } class Foo20 : MyInterface { public int GetAnInt() { return 20; } public string GetAString() { return "Foo20"; } } class Foo21 : MyInterface { public int GetAnInt() { return 21; } public string GetAString() { return "Foo21"; } } class Foo22 : MyInterface { public int GetAnInt() { return 22; } public string GetAString() { return "Foo22"; } } class Foo23 : MyInterface { public int GetAnInt() { return 23; } public string GetAString() { return "Foo23"; } } class Foo24 : MyInterface { public int GetAnInt() { return 24; } public string GetAString() { return "Foo24"; } } class Foo25 : MyInterface { public int GetAnInt() { return 25; } public string GetAString() { return "Foo25"; } } class Foo26 : MyInterface { public int GetAnInt() { return 26; } public string GetAString() { return "Foo26"; } } class Foo27 : MyInterface { public int GetAnInt() { return 27; } public string GetAString() { return "Foo27"; } } class Foo28 : MyInterface { public int GetAnInt() { return 28; } public string GetAString() { return "Foo28"; } } class Foo29 : MyInterface { public int GetAnInt() { return 29; } public string GetAString() { return "Foo29"; } } class Foo30 : MyInterface { public int GetAnInt() { return 30; } public string GetAString() { return "Foo30"; } } class Foo31 : MyInterface { public int GetAnInt() { return 31; } public string GetAString() { return "Foo31"; } } class Foo32 : MyInterface { public int GetAnInt() { return 32; } public string GetAString() { return "Foo32"; } } class Foo33 : MyInterface { public int GetAnInt() { return 33; } public string GetAString() { return "Foo33"; } } class Foo34 : MyInterface { public int GetAnInt() { return 34; } public string GetAString() { return "Foo34"; } } class Foo35 : MyInterface { public int GetAnInt() { return 35; } public string GetAString() { return "Foo35"; } } class Foo36 : MyInterface { public int GetAnInt() { return 36; } public string GetAString() { return "Foo36"; } } class Foo37 : MyInterface { public int GetAnInt() { return 37; } public string GetAString() { return "Foo37"; } } class Foo38 : MyInterface { public int GetAnInt() { return 38; } public string GetAString() { return "Foo38"; } } class Foo39 : MyInterface { public int GetAnInt() { return 39; } public string GetAString() { return "Foo39"; } } class Foo40 : MyInterface { public int GetAnInt() { return 40; } public string GetAString() { return "Foo40"; } } class Foo41 : MyInterface { public int GetAnInt() { return 41; } public string GetAString() { return "Foo41"; } } class Foo42 : MyInterface { public int GetAnInt() { return 42; } public string GetAString() { return "Foo42"; } } class Foo43 : MyInterface { public int GetAnInt() { return 43; } public string GetAString() { return "Foo43"; } } class Foo44 : MyInterface { public int GetAnInt() { return 44; } public string GetAString() { return "Foo44"; } } class Foo45 : MyInterface { public int GetAnInt() { return 45; } public string GetAString() { return "Foo45"; } } class Foo46 : MyInterface { public int GetAnInt() { return 46; } public string GetAString() { return "Foo46"; } } class Foo47 : MyInterface { public int GetAnInt() { return 47; } public string GetAString() { return "Foo47"; } } class Foo48 : MyInterface { public int GetAnInt() { return 48; } public string GetAString() { return "Foo48"; } } class Foo49 : MyInterface { public int GetAnInt() { return 49; } public string GetAString() { return "Foo49"; } } #endregion #region Implicit Interface Test private static int TestMultipleInterfaces() { TestClass<int> testInt = new TestClass<int>(5); MyInterface myInterface = testInt as MyInterface; if (!myInterface.GetAString().Equals("TestClass")) { Console.Write("On type TestClass, MyInterface.GetAString() returned "); Console.Write(myInterface.GetAString()); Console.WriteLine(" Expected: TestClass"); return Fail; } if (myInterface.GetAnInt() != 1) { Console.Write("On type TestClass, MyInterface.GetAnInt() returned "); Console.Write(myInterface.GetAnInt()); Console.WriteLine(" Expected: 1"); return Fail; } Interface<int> itf = testInt as Interface<int>; if (itf.GetT() != 5) { Console.Write("On type TestClass, Interface<int>::GetT() returned "); Console.Write(itf.GetT()); Console.WriteLine(" Expected: 5"); return Fail; } return Pass; } interface Interface<T> { T GetT(); } class TestClass<T> : MyInterface, Interface<T> { T _t; public TestClass(T t) { _t = t; } public T GetT() { return _t; } public int GetAnInt() { return 1; } public string GetAString() { return "TestClass"; } } #endregion #region Array Interfaces Test private static int TestArrayInterfaces() { { object stringArray = new string[] { "A", "B", "C", "D" }; Console.WriteLine("Testing IEnumerable<T> on array..."); string result = String.Empty; foreach (var s in (System.Collections.Generic.IEnumerable<string>)stringArray) result += s; if (result != "ABCD") { Console.WriteLine("Failed."); return Fail; } } { object stringArray = new string[] { "A", "B", "C", "D" }; Console.WriteLine("Testing IEnumerable on array..."); string result = String.Empty; foreach (var s in (System.Collections.IEnumerable)stringArray) result += s; if (result != "ABCD") { Console.WriteLine("Failed."); return Fail; } } { object intArray = new int[5, 5]; Console.WriteLine("Testing IList on MDArray..."); if (((System.Collections.IList)intArray).Count != 25) { Console.WriteLine("Failed."); return Fail; } } return Pass; } #endregion #region Variant interface tests interface IContravariantInterface<in T> { string DoContravariant(T value); } interface ICovariantInterface<out T> { T DoCovariant(object value); } class TypeWithVariantInterfaces<T> : IContravariantInterface<T>, ICovariantInterface<T> { public string DoContravariant(T value) { return value.ToString(); } public T DoCovariant(object value) { return value is T ? (T)value : default(T); } } static IContravariantInterface<string> s_contravariantObject = new TypeWithVariantInterfaces<object>(); static ICovariantInterface<object> s_covariantObject = new TypeWithVariantInterfaces<string>(); static IEnumerable<int> s_arrayCovariantObject = (IEnumerable<int>)(object)new uint[] { 5, 10, 15 }; private static int TestVariantInterfaces() { if (s_contravariantObject.DoContravariant("Hello") != "Hello") return Fail; if (s_covariantObject.DoCovariant("World") as string != "World") return Fail; int sum = 0; foreach (var e in s_arrayCovariantObject) sum += e; if (sum != 30) return Fail; return Pass; } class SpecialArrayBase { } class SpecialArrayDerived : SpecialArrayBase { } // NOTE: ICollection is not a variant interface, but arrays can cast with it as if it was static ICollection<SpecialArrayBase> s_specialDerived = new SpecialArrayDerived[42]; static ICollection<uint> s_specialInt = (ICollection<uint>)(object)new int[85]; private static int TestSpecialArrayInterfaces() { if (s_specialDerived.Count != 42) return Fail; if (s_specialInt.Count != 85) return Fail; return Pass; } #endregion #region Interface call optimization tests public interface ISomeInterface { int SomeValue { get; } } public abstract class SomeAbstractBaseClass : ISomeInterface { public abstract int SomeValue { get; } } public class SomeClass : SomeAbstractBaseClass { public override int SomeValue { [MethodImpl(MethodImplOptions.NoInlining)] get { return 14; } } } private static int TestIterfaceCallOptimization() { ISomeInterface test = new SomeClass(); int v = test.SomeValue; return (v == 14) ? Pass : Fail; } #endregion class TestDefaultInterfaceMethods { interface IFoo { int GetNumber() => 42; } interface IBar : IFoo { int IFoo.GetNumber() => 43; } class Foo : IFoo { } class Bar : IBar { } class Baz : IFoo { public int GetNumber() => 100; } interface IFoo<T> { Type GetInterfaceType() => typeof(IFoo<T>); } class Foo<T> : IFoo<T> { } public static void Run() { Console.WriteLine("Testing default interface methods..."); if (((IFoo)new Foo()).GetNumber() != 42) throw new Exception(); if (((IFoo)new Bar()).GetNumber() != 43) throw new Exception(); if (((IFoo)new Baz()).GetNumber() != 100) throw new Exception(); if (((IFoo<object>)new Foo<object>()).GetInterfaceType() != typeof(IFoo<object>)) throw new Exception(); if (((IFoo<int>)new Foo<int>()).GetInterfaceType() != typeof(IFoo<int>)) throw new Exception(); } } class TestDefaultInterfaceVariance { class Foo : IVariant<string>, IVariant<object> { string IVariant<object>.Frob() => "Hello class"; } interface IVariant<in T> { string Frob() => "Hello default"; } public static void Run() { Console.WriteLine("Testing default interface variant ordering..."); if (((IVariant<object>)new Foo()).Frob() != "Hello class") throw new Exception(); if (((IVariant<string>)new Foo()).Frob() != "Hello class") throw new Exception(); if (((IVariant<ValueType>)new Foo()).Frob() != "Hello class") throw new Exception(); } } class TestSharedIntefaceMethods { interface IInnerValueGrabber { string GetInnerValue(); } interface IFace<T> : IInnerValueGrabber { string GrabValue(T x) => $"'{GetInnerValue()}' over '{typeof(T)}' with '{x}'"; } class Base<T> : IFace<T>, IInnerValueGrabber { public string InnerValue; public string GetInnerValue() => InnerValue; } class Derived<T, U> : Base<T>, IFace<U> { } struct Yadda : IFace<object>, IInnerValueGrabber { public string InnerValue; public string GetInnerValue() => InnerValue; } class Atom1 { public override string ToString() => "The Atom1"; } class Atom2 { public override string ToString() => "The Atom2"; } public static void Run() { Console.WriteLine("Testing default interface methods and shared code..."); var x = new Derived<Atom1, Atom2>() { InnerValue = "My inner value" }; string r1 = ((IFace<Atom1>)x).GrabValue(new Atom1()); if (r1 != "'My inner value' over 'Interfaces+TestSharedIntefaceMethods+Atom1' with 'The Atom1'") throw new Exception(); string r2 = ((IFace<Atom2>)x).GrabValue(new Atom2()); if (r2 != "'My inner value' over 'Interfaces+TestSharedIntefaceMethods+Atom2' with 'The Atom2'") throw new Exception(); IFace<object> o = new Yadda() { InnerValue = "SomeString" }; string r3 = o.GrabValue("Hello there"); if (r3 != "'SomeString' over 'System.Object' with 'Hello there'") throw new Exception(); } } class TestCovariantReturns { interface IFoo { } class Foo : IFoo { public readonly string State; public Foo(string state) => State = state; } class Base { public virtual IFoo GetFoo() => throw new NotImplementedException(); } class Derived : Base { public override Foo GetFoo() => new Foo("Derived"); } class SuperDerived : Derived { public override Foo GetFoo() => new Foo("SuperDerived"); } class BaseWithUnusedVirtual { public virtual IFoo GetFoo() => throw new NotImplementedException(); } class DerivedWithOverridenUnusedVirtual : BaseWithUnusedVirtual { public override Foo GetFoo() => new Foo("DerivedWithOverridenUnusedVirtual"); } class SuperDerivedWithOverridenUnusedVirtual : DerivedWithOverridenUnusedVirtual { public override Foo GetFoo() => new Foo("SuperDerivedWithOverridenUnusedVirtual"); } interface IInterfaceWithCovariantReturn { IFoo GetFoo(); } class ClassImplementingInterface : IInterfaceWithCovariantReturn { public virtual IFoo GetFoo() => throw new NotImplementedException(); } class DerivedClassImplementingInterface : ClassImplementingInterface { public override Foo GetFoo() => new Foo("DerivedClassImplementingInterface"); } public static void Run() { Console.WriteLine("Testing covariant returns..."); { Base b = new Derived(); if (((Foo)b.GetFoo()).State != "Derived") throw new Exception(); } { Base b = new SuperDerived(); if (((Foo)b.GetFoo()).State != "SuperDerived") throw new Exception(); } { Derived d = new SuperDerived(); if (d.GetFoo().State != "SuperDerived") throw new Exception(); } { DerivedWithOverridenUnusedVirtual b = new DerivedWithOverridenUnusedVirtual(); if (b.GetFoo().State != "DerivedWithOverridenUnusedVirtual") throw new Exception(); } { DerivedWithOverridenUnusedVirtual b = new SuperDerivedWithOverridenUnusedVirtual(); if (b.GetFoo().State != "SuperDerivedWithOverridenUnusedVirtual") throw new Exception(); } { IInterfaceWithCovariantReturn i = new DerivedClassImplementingInterface(); if (((Foo)i.GetFoo()).State != "DerivedClassImplementingInterface") throw new Exception(); } } } class TestVariantInterfaceOptimizations { static IEnumerable<Other> s_others = (IEnumerable<Other>)(object)new This[3] { (This)33, (This)66, (This)1 }; enum This : sbyte { } enum Other : sbyte { } sealed class MySealedClass { } interface IContravariantInterface<in T> { string DoContravariant(T value); } interface ICovariantInterface<out T> { T DoCovariant(object value); } class CoAndContravariantOverSealed : IContravariantInterface<object>, ICovariantInterface<MySealedClass> { public string DoContravariant(object value) => "Hello"; public MySealedClass DoCovariant(object value) => null; } public static void Run() { Console.WriteLine("Testing variant optimizations..."); int sum = 0; foreach (var other in s_others) { sum += (int)other; } if (sum != 100) throw new Exception(); ICovariantInterface<object> i1 = new CoAndContravariantOverSealed(); i1.DoCovariant(null); IContravariantInterface<MySealedClass> i2 = new CoAndContravariantOverSealed(); i2.DoContravariant(null); } } class TestDynamicInterfaceCastable { class CastableClass<TInterface, TImpl> : IDynamicInterfaceCastable { RuntimeTypeHandle IDynamicInterfaceCastable.GetInterfaceImplementation(RuntimeTypeHandle interfaceType) => interfaceType.Equals(typeof(TInterface).TypeHandle) ? typeof(TImpl).TypeHandle : default; bool IDynamicInterfaceCastable.IsInterfaceImplemented(RuntimeTypeHandle interfaceType, bool throwIfNotImplemented) => interfaceType.Equals(typeof(TInterface).TypeHandle); } interface IInterface { string GetCookie(); } [DynamicInterfaceCastableImplementation] interface IInterfaceCastableImpl : IInterface { string IInterface.GetCookie() => "IInterfaceCastableImpl"; } [DynamicInterfaceCastableImplementation] interface IInterfaceCastableImpl<T> : IInterface { string IInterface.GetCookie() => typeof(T).Name; } interface IInterfaceImpl : IInterface { string IInterface.GetCookie() => "IInterfaceImpl"; } [DynamicInterfaceCastableImplementation] interface IInterfaceIndirectCastableImpl : IInterfaceImpl { } public static void Run() { Console.WriteLine("Testing IDynamicInterfaceCastable..."); { IInterface o = (IInterface)new CastableClass<IInterface, IInterfaceCastableImpl>(); if (o.GetCookie() != "IInterfaceCastableImpl") throw new Exception(); } { IInterface o = (IInterface)new CastableClass<IInterface, IInterfaceImpl>(); bool success = false; try { o.GetCookie(); } catch (InvalidOperationException) { success = true; } if (!success) throw new Exception(); } { IInterface o = (IInterface)new CastableClass<IInterface, IInterfaceIndirectCastableImpl>(); if (o.GetCookie() != "IInterfaceImpl") throw new Exception(); } { IInterface o = (IInterface)new CastableClass<IInterface, IInterfaceCastableImpl<int>>(); if (o.GetCookie() != "Int32") throw new Exception(); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Text; using System.Collections.Generic; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; public class Interfaces { const int Pass = 100; const int Fail = -1; public static int Run() { if (TestInterfaceCache() == Fail) return Fail; if (TestAVInInterfaceCache() == Fail) return Fail; if (TestMultipleInterfaces() == Fail) return Fail; if (TestArrayInterfaces() == Fail) return Fail; if (TestVariantInterfaces() == Fail) return Fail; if (TestSpecialArrayInterfaces() == Fail) return Fail; if (TestIterfaceCallOptimization() == Fail) return Fail; TestDefaultInterfaceMethods.Run(); TestDefaultInterfaceVariance.Run(); TestVariantInterfaceOptimizations.Run(); TestSharedIntefaceMethods.Run(); TestCovariantReturns.Run(); TestDynamicInterfaceCastable.Run(); TestStaticInterfaceMethodsAnalysis.Run(); TestStaticInterfaceMethods.Run(); return Pass; } private static MyInterface[] MakeInterfaceArray() { MyInterface[] itfs = new MyInterface[50]; itfs[0] = new Foo0(); itfs[1] = new Foo1(); itfs[2] = new Foo2(); itfs[3] = new Foo3(); itfs[4] = new Foo4(); itfs[5] = new Foo5(); itfs[6] = new Foo6(); itfs[7] = new Foo7(); itfs[8] = new Foo8(); itfs[9] = new Foo9(); itfs[10] = new Foo10(); itfs[11] = new Foo11(); itfs[12] = new Foo12(); itfs[13] = new Foo13(); itfs[14] = new Foo14(); itfs[15] = new Foo15(); itfs[16] = new Foo16(); itfs[17] = new Foo17(); itfs[18] = new Foo18(); itfs[19] = new Foo19(); itfs[20] = new Foo20(); itfs[21] = new Foo21(); itfs[22] = new Foo22(); itfs[23] = new Foo23(); itfs[24] = new Foo24(); itfs[25] = new Foo25(); itfs[26] = new Foo26(); itfs[27] = new Foo27(); itfs[28] = new Foo28(); itfs[29] = new Foo29(); itfs[30] = new Foo30(); itfs[31] = new Foo31(); itfs[32] = new Foo32(); itfs[33] = new Foo33(); itfs[34] = new Foo34(); itfs[35] = new Foo35(); itfs[36] = new Foo36(); itfs[37] = new Foo37(); itfs[38] = new Foo38(); itfs[39] = new Foo39(); itfs[40] = new Foo40(); itfs[41] = new Foo41(); itfs[42] = new Foo42(); itfs[43] = new Foo43(); itfs[44] = new Foo44(); itfs[45] = new Foo45(); itfs[46] = new Foo46(); itfs[47] = new Foo47(); itfs[48] = new Foo48(); itfs[49] = new Foo49(); return itfs; } #region Interface Dispatch Cache Test private static int TestInterfaceCache() { MyInterface[] itfs = MakeInterfaceArray(); StringBuilder sb = new StringBuilder(); int counter = 0; for (int i = 0; i < 50; i++) { sb.Append(itfs[i].GetAString()); counter += itfs[i].GetAnInt(); } string expected = "Foo0Foo1Foo2Foo3Foo4Foo5Foo6Foo7Foo8Foo9Foo10Foo11Foo12Foo13Foo14Foo15Foo16Foo17Foo18Foo19Foo20Foo21Foo22Foo23Foo24Foo25Foo26Foo27Foo28Foo29Foo30Foo31Foo32Foo33Foo34Foo35Foo36Foo37Foo38Foo39Foo40Foo41Foo42Foo43Foo44Foo45Foo46Foo47Foo48Foo49"; if (!expected.Equals(sb.ToString())) { Console.WriteLine("Concatenating strings from interface calls failed."); Console.Write("Expected: "); Console.WriteLine(expected); Console.Write(" Actual: "); Console.WriteLine(sb.ToString()); return Fail; } if (counter != 1225) { Console.WriteLine("Summing ints from interface calls failed."); Console.WriteLine("Expected: 1225"); Console.Write("Actual: "); Console.WriteLine(counter); return Fail; } return 100; } private static int TestAVInInterfaceCache() { MyInterface[] itfs = MakeInterfaceArray(); MyInterface[] testArray = new MyInterface[itfs.Length * 2]; for (int i = 0; i < itfs.Length; i++) { testArray[i * 2 + 1] = itfs[i]; } int numExceptions = 0; // Make sure AV in dispatch helpers is translated to NullRef for (int i = 0; i < testArray.Length; i++) { try { testArray[i].GetAnInt(); } catch (NullReferenceException) { numExceptions++; } } // Make sure there's no trouble with unwinding out of the dispatch helper InterfaceWithManyParameters testInstance = null; for (int i = 0; i < 3; i++) { try { testInstance.ManyParameters(0, 0, 0, 0, 0, 0, 0, 0); } catch (NullReferenceException) { numExceptions++; } if (testInstance == null) testInstance = new ClassWithManyParameters(); else testInstance = null; } return numExceptions == itfs.Length + 2 ? Pass : Fail; } interface MyInterface { int GetAnInt(); string GetAString(); } interface InterfaceWithManyParameters { int ManyParameters(int a, int b, int c, int d, int e, int f, int g, int h); } class ClassWithManyParameters : InterfaceWithManyParameters { public int ManyParameters(int a, int b, int c, int d, int e, int f, int g, int h) => 42; } class Foo0 : MyInterface { public int GetAnInt() { return 0; } public string GetAString() { return "Foo0"; } } class Foo1 : MyInterface { public int GetAnInt() { return 1; } public string GetAString() { return "Foo1"; } } class Foo2 : MyInterface { public int GetAnInt() { return 2; } public string GetAString() { return "Foo2"; } } class Foo3 : MyInterface { public int GetAnInt() { return 3; } public string GetAString() { return "Foo3"; } } class Foo4 : MyInterface { public int GetAnInt() { return 4; } public string GetAString() { return "Foo4"; } } class Foo5 : MyInterface { public int GetAnInt() { return 5; } public string GetAString() { return "Foo5"; } } class Foo6 : MyInterface { public int GetAnInt() { return 6; } public string GetAString() { return "Foo6"; } } class Foo7 : MyInterface { public int GetAnInt() { return 7; } public string GetAString() { return "Foo7"; } } class Foo8 : MyInterface { public int GetAnInt() { return 8; } public string GetAString() { return "Foo8"; } } class Foo9 : MyInterface { public int GetAnInt() { return 9; } public string GetAString() { return "Foo9"; } } class Foo10 : MyInterface { public int GetAnInt() { return 10; } public string GetAString() { return "Foo10"; } } class Foo11 : MyInterface { public int GetAnInt() { return 11; } public string GetAString() { return "Foo11"; } } class Foo12 : MyInterface { public int GetAnInt() { return 12; } public string GetAString() { return "Foo12"; } } class Foo13 : MyInterface { public int GetAnInt() { return 13; } public string GetAString() { return "Foo13"; } } class Foo14 : MyInterface { public int GetAnInt() { return 14; } public string GetAString() { return "Foo14"; } } class Foo15 : MyInterface { public int GetAnInt() { return 15; } public string GetAString() { return "Foo15"; } } class Foo16 : MyInterface { public int GetAnInt() { return 16; } public string GetAString() { return "Foo16"; } } class Foo17 : MyInterface { public int GetAnInt() { return 17; } public string GetAString() { return "Foo17"; } } class Foo18 : MyInterface { public int GetAnInt() { return 18; } public string GetAString() { return "Foo18"; } } class Foo19 : MyInterface { public int GetAnInt() { return 19; } public string GetAString() { return "Foo19"; } } class Foo20 : MyInterface { public int GetAnInt() { return 20; } public string GetAString() { return "Foo20"; } } class Foo21 : MyInterface { public int GetAnInt() { return 21; } public string GetAString() { return "Foo21"; } } class Foo22 : MyInterface { public int GetAnInt() { return 22; } public string GetAString() { return "Foo22"; } } class Foo23 : MyInterface { public int GetAnInt() { return 23; } public string GetAString() { return "Foo23"; } } class Foo24 : MyInterface { public int GetAnInt() { return 24; } public string GetAString() { return "Foo24"; } } class Foo25 : MyInterface { public int GetAnInt() { return 25; } public string GetAString() { return "Foo25"; } } class Foo26 : MyInterface { public int GetAnInt() { return 26; } public string GetAString() { return "Foo26"; } } class Foo27 : MyInterface { public int GetAnInt() { return 27; } public string GetAString() { return "Foo27"; } } class Foo28 : MyInterface { public int GetAnInt() { return 28; } public string GetAString() { return "Foo28"; } } class Foo29 : MyInterface { public int GetAnInt() { return 29; } public string GetAString() { return "Foo29"; } } class Foo30 : MyInterface { public int GetAnInt() { return 30; } public string GetAString() { return "Foo30"; } } class Foo31 : MyInterface { public int GetAnInt() { return 31; } public string GetAString() { return "Foo31"; } } class Foo32 : MyInterface { public int GetAnInt() { return 32; } public string GetAString() { return "Foo32"; } } class Foo33 : MyInterface { public int GetAnInt() { return 33; } public string GetAString() { return "Foo33"; } } class Foo34 : MyInterface { public int GetAnInt() { return 34; } public string GetAString() { return "Foo34"; } } class Foo35 : MyInterface { public int GetAnInt() { return 35; } public string GetAString() { return "Foo35"; } } class Foo36 : MyInterface { public int GetAnInt() { return 36; } public string GetAString() { return "Foo36"; } } class Foo37 : MyInterface { public int GetAnInt() { return 37; } public string GetAString() { return "Foo37"; } } class Foo38 : MyInterface { public int GetAnInt() { return 38; } public string GetAString() { return "Foo38"; } } class Foo39 : MyInterface { public int GetAnInt() { return 39; } public string GetAString() { return "Foo39"; } } class Foo40 : MyInterface { public int GetAnInt() { return 40; } public string GetAString() { return "Foo40"; } } class Foo41 : MyInterface { public int GetAnInt() { return 41; } public string GetAString() { return "Foo41"; } } class Foo42 : MyInterface { public int GetAnInt() { return 42; } public string GetAString() { return "Foo42"; } } class Foo43 : MyInterface { public int GetAnInt() { return 43; } public string GetAString() { return "Foo43"; } } class Foo44 : MyInterface { public int GetAnInt() { return 44; } public string GetAString() { return "Foo44"; } } class Foo45 : MyInterface { public int GetAnInt() { return 45; } public string GetAString() { return "Foo45"; } } class Foo46 : MyInterface { public int GetAnInt() { return 46; } public string GetAString() { return "Foo46"; } } class Foo47 : MyInterface { public int GetAnInt() { return 47; } public string GetAString() { return "Foo47"; } } class Foo48 : MyInterface { public int GetAnInt() { return 48; } public string GetAString() { return "Foo48"; } } class Foo49 : MyInterface { public int GetAnInt() { return 49; } public string GetAString() { return "Foo49"; } } #endregion #region Implicit Interface Test private static int TestMultipleInterfaces() { TestClass<int> testInt = new TestClass<int>(5); MyInterface myInterface = testInt as MyInterface; if (!myInterface.GetAString().Equals("TestClass")) { Console.Write("On type TestClass, MyInterface.GetAString() returned "); Console.Write(myInterface.GetAString()); Console.WriteLine(" Expected: TestClass"); return Fail; } if (myInterface.GetAnInt() != 1) { Console.Write("On type TestClass, MyInterface.GetAnInt() returned "); Console.Write(myInterface.GetAnInt()); Console.WriteLine(" Expected: 1"); return Fail; } Interface<int> itf = testInt as Interface<int>; if (itf.GetT() != 5) { Console.Write("On type TestClass, Interface<int>::GetT() returned "); Console.Write(itf.GetT()); Console.WriteLine(" Expected: 5"); return Fail; } return Pass; } interface Interface<T> { T GetT(); } class TestClass<T> : MyInterface, Interface<T> { T _t; public TestClass(T t) { _t = t; } public T GetT() { return _t; } public int GetAnInt() { return 1; } public string GetAString() { return "TestClass"; } } #endregion #region Array Interfaces Test private static int TestArrayInterfaces() { { object stringArray = new string[] { "A", "B", "C", "D" }; Console.WriteLine("Testing IEnumerable<T> on array..."); string result = String.Empty; foreach (var s in (System.Collections.Generic.IEnumerable<string>)stringArray) result += s; if (result != "ABCD") { Console.WriteLine("Failed."); return Fail; } } { object stringArray = new string[] { "A", "B", "C", "D" }; Console.WriteLine("Testing IEnumerable on array..."); string result = String.Empty; foreach (var s in (System.Collections.IEnumerable)stringArray) result += s; if (result != "ABCD") { Console.WriteLine("Failed."); return Fail; } } { object intArray = new int[5, 5]; Console.WriteLine("Testing IList on MDArray..."); if (((System.Collections.IList)intArray).Count != 25) { Console.WriteLine("Failed."); return Fail; } } return Pass; } #endregion #region Variant interface tests interface IContravariantInterface<in T> { string DoContravariant(T value); } interface ICovariantInterface<out T> { T DoCovariant(object value); } class TypeWithVariantInterfaces<T> : IContravariantInterface<T>, ICovariantInterface<T> { public string DoContravariant(T value) { return value.ToString(); } public T DoCovariant(object value) { return value is T ? (T)value : default(T); } } static IContravariantInterface<string> s_contravariantObject = new TypeWithVariantInterfaces<object>(); static ICovariantInterface<object> s_covariantObject = new TypeWithVariantInterfaces<string>(); static IEnumerable<int> s_arrayCovariantObject = (IEnumerable<int>)(object)new uint[] { 5, 10, 15 }; private static int TestVariantInterfaces() { if (s_contravariantObject.DoContravariant("Hello") != "Hello") return Fail; if (s_covariantObject.DoCovariant("World") as string != "World") return Fail; int sum = 0; foreach (var e in s_arrayCovariantObject) sum += e; if (sum != 30) return Fail; return Pass; } class SpecialArrayBase { } class SpecialArrayDerived : SpecialArrayBase { } // NOTE: ICollection is not a variant interface, but arrays can cast with it as if it was static ICollection<SpecialArrayBase> s_specialDerived = new SpecialArrayDerived[42]; static ICollection<uint> s_specialInt = (ICollection<uint>)(object)new int[85]; private static int TestSpecialArrayInterfaces() { if (s_specialDerived.Count != 42) return Fail; if (s_specialInt.Count != 85) return Fail; return Pass; } #endregion #region Interface call optimization tests public interface ISomeInterface { int SomeValue { get; } } public abstract class SomeAbstractBaseClass : ISomeInterface { public abstract int SomeValue { get; } } public class SomeClass : SomeAbstractBaseClass { public override int SomeValue { [MethodImpl(MethodImplOptions.NoInlining)] get { return 14; } } } private static int TestIterfaceCallOptimization() { ISomeInterface test = new SomeClass(); int v = test.SomeValue; return (v == 14) ? Pass : Fail; } #endregion class TestDefaultInterfaceMethods { interface IFoo { int GetNumber() => 42; } interface IBar : IFoo { int IFoo.GetNumber() => 43; } class Foo : IFoo { } class Bar : IBar { } class Baz : IFoo { public int GetNumber() => 100; } interface IFoo<T> { Type GetInterfaceType() => typeof(IFoo<T>); } class Foo<T> : IFoo<T> { } public static void Run() { Console.WriteLine("Testing default interface methods..."); if (((IFoo)new Foo()).GetNumber() != 42) throw new Exception(); if (((IFoo)new Bar()).GetNumber() != 43) throw new Exception(); if (((IFoo)new Baz()).GetNumber() != 100) throw new Exception(); if (((IFoo<object>)new Foo<object>()).GetInterfaceType() != typeof(IFoo<object>)) throw new Exception(); if (((IFoo<int>)new Foo<int>()).GetInterfaceType() != typeof(IFoo<int>)) throw new Exception(); } } class TestDefaultInterfaceVariance { class Foo : IVariant<string>, IVariant<object> { string IVariant<object>.Frob() => "Hello class"; } interface IVariant<in T> { string Frob() => "Hello default"; } public static void Run() { Console.WriteLine("Testing default interface variant ordering..."); if (((IVariant<object>)new Foo()).Frob() != "Hello class") throw new Exception(); if (((IVariant<string>)new Foo()).Frob() != "Hello class") throw new Exception(); if (((IVariant<ValueType>)new Foo()).Frob() != "Hello class") throw new Exception(); } } class TestSharedIntefaceMethods { interface IInnerValueGrabber { string GetInnerValue(); } interface IFace<T> : IInnerValueGrabber { string GrabValue(T x) => $"'{GetInnerValue()}' over '{typeof(T)}' with '{x}'"; } class Base<T> : IFace<T>, IInnerValueGrabber { public string InnerValue; public string GetInnerValue() => InnerValue; } class Derived<T, U> : Base<T>, IFace<U> { } struct Yadda : IFace<object>, IInnerValueGrabber { public string InnerValue; public string GetInnerValue() => InnerValue; } class Atom1 { public override string ToString() => "The Atom1"; } class Atom2 { public override string ToString() => "The Atom2"; } public static void Run() { Console.WriteLine("Testing default interface methods and shared code..."); var x = new Derived<Atom1, Atom2>() { InnerValue = "My inner value" }; string r1 = ((IFace<Atom1>)x).GrabValue(new Atom1()); if (r1 != "'My inner value' over 'Interfaces+TestSharedIntefaceMethods+Atom1' with 'The Atom1'") throw new Exception(); string r2 = ((IFace<Atom2>)x).GrabValue(new Atom2()); if (r2 != "'My inner value' over 'Interfaces+TestSharedIntefaceMethods+Atom2' with 'The Atom2'") throw new Exception(); IFace<object> o = new Yadda() { InnerValue = "SomeString" }; string r3 = o.GrabValue("Hello there"); if (r3 != "'SomeString' over 'System.Object' with 'Hello there'") throw new Exception(); } } class TestCovariantReturns { interface IFoo { } class Foo : IFoo { public readonly string State; public Foo(string state) => State = state; } class Base { public virtual IFoo GetFoo() => throw new NotImplementedException(); } class Derived : Base { public override Foo GetFoo() => new Foo("Derived"); } class SuperDerived : Derived { public override Foo GetFoo() => new Foo("SuperDerived"); } class BaseWithUnusedVirtual { public virtual IFoo GetFoo() => throw new NotImplementedException(); } class DerivedWithOverridenUnusedVirtual : BaseWithUnusedVirtual { public override Foo GetFoo() => new Foo("DerivedWithOverridenUnusedVirtual"); } class SuperDerivedWithOverridenUnusedVirtual : DerivedWithOverridenUnusedVirtual { public override Foo GetFoo() => new Foo("SuperDerivedWithOverridenUnusedVirtual"); } interface IInterfaceWithCovariantReturn { IFoo GetFoo(); } class ClassImplementingInterface : IInterfaceWithCovariantReturn { public virtual IFoo GetFoo() => throw new NotImplementedException(); } class DerivedClassImplementingInterface : ClassImplementingInterface { public override Foo GetFoo() => new Foo("DerivedClassImplementingInterface"); } public static void Run() { Console.WriteLine("Testing covariant returns..."); { Base b = new Derived(); if (((Foo)b.GetFoo()).State != "Derived") throw new Exception(); } { Base b = new SuperDerived(); if (((Foo)b.GetFoo()).State != "SuperDerived") throw new Exception(); } { Derived d = new SuperDerived(); if (d.GetFoo().State != "SuperDerived") throw new Exception(); } { DerivedWithOverridenUnusedVirtual b = new DerivedWithOverridenUnusedVirtual(); if (b.GetFoo().State != "DerivedWithOverridenUnusedVirtual") throw new Exception(); } { DerivedWithOverridenUnusedVirtual b = new SuperDerivedWithOverridenUnusedVirtual(); if (b.GetFoo().State != "SuperDerivedWithOverridenUnusedVirtual") throw new Exception(); } { IInterfaceWithCovariantReturn i = new DerivedClassImplementingInterface(); if (((Foo)i.GetFoo()).State != "DerivedClassImplementingInterface") throw new Exception(); } } } class TestVariantInterfaceOptimizations { static IEnumerable<Other> s_others = (IEnumerable<Other>)(object)new This[3] { (This)33, (This)66, (This)1 }; enum This : sbyte { } enum Other : sbyte { } sealed class MySealedClass { } interface IContravariantInterface<in T> { string DoContravariant(T value); } interface ICovariantInterface<out T> { T DoCovariant(object value); } class CoAndContravariantOverSealed : IContravariantInterface<object>, ICovariantInterface<MySealedClass> { public string DoContravariant(object value) => "Hello"; public MySealedClass DoCovariant(object value) => null; } public static void Run() { Console.WriteLine("Testing variant optimizations..."); int sum = 0; foreach (var other in s_others) { sum += (int)other; } if (sum != 100) throw new Exception(); ICovariantInterface<object> i1 = new CoAndContravariantOverSealed(); i1.DoCovariant(null); IContravariantInterface<MySealedClass> i2 = new CoAndContravariantOverSealed(); i2.DoContravariant(null); } } class TestDynamicInterfaceCastable { class CastableClass<TInterface, TImpl> : IDynamicInterfaceCastable { RuntimeTypeHandle IDynamicInterfaceCastable.GetInterfaceImplementation(RuntimeTypeHandle interfaceType) => interfaceType.Equals(typeof(TInterface).TypeHandle) ? typeof(TImpl).TypeHandle : default; bool IDynamicInterfaceCastable.IsInterfaceImplemented(RuntimeTypeHandle interfaceType, bool throwIfNotImplemented) => interfaceType.Equals(typeof(TInterface).TypeHandle); } interface IInterface { string GetCookie(); } [DynamicInterfaceCastableImplementation] interface IInterfaceCastableImpl : IInterface { string IInterface.GetCookie() => "IInterfaceCastableImpl"; } [DynamicInterfaceCastableImplementation] interface IInterfaceCastableImpl<T> : IInterface { string IInterface.GetCookie() => typeof(T).Name; } interface IInterfaceImpl : IInterface { string IInterface.GetCookie() => "IInterfaceImpl"; } [DynamicInterfaceCastableImplementation] interface IInterfaceIndirectCastableImpl : IInterfaceImpl { } public static void Run() { Console.WriteLine("Testing IDynamicInterfaceCastable..."); { IInterface o = (IInterface)new CastableClass<IInterface, IInterfaceCastableImpl>(); if (o.GetCookie() != "IInterfaceCastableImpl") throw new Exception(); } { IInterface o = (IInterface)new CastableClass<IInterface, IInterfaceImpl>(); bool success = false; try { o.GetCookie(); } catch (InvalidOperationException) { success = true; } if (!success) throw new Exception(); } { IInterface o = (IInterface)new CastableClass<IInterface, IInterfaceIndirectCastableImpl>(); if (o.GetCookie() != "IInterfaceImpl") throw new Exception(); } { IInterface o = (IInterface)new CastableClass<IInterface, IInterfaceCastableImpl<int>>(); if (o.GetCookie() != "Int32") throw new Exception(); } } } class TestStaticInterfaceMethodsAnalysis { interface IFoo { static abstract object Frob(); } class Foo<T> : IFoo { static object IFoo.Frob() => new Gen<T>(); } static object CallFrob<T>() where T : IFoo => T.Frob(); class Gen<T> { } struct Struct1 { } struct Struct2 { } public static void Run() { CallFrob<Foo<object>>(); Console.WriteLine(typeof(Foo<string>)); CallFrob<Foo<Struct1>>(); Console.WriteLine(typeof(Foo<Struct2>)); } } class TestStaticInterfaceMethods { interface ISimple { static abstract string GetCookie(); static abstract string GetCookieGeneric<T>(); } class SimpleClass : ISimple { public static string GetCookie() => "SimpleClass"; public static string GetCookieGeneric<T>() => $"SimpleClass.GetCookieGeneric<{typeof(T).Name}>"; } struct SimpleStruct : ISimple { public static string GetCookie() => "SimpleStruct"; public static string GetCookieGeneric<T>() => $"SimpleStruct.GetCookieGeneric<{typeof(T).Name}>"; } struct SimpleGenericStruct<T> : ISimple { public static string GetCookie() => $"SimpleGenericStruct<{typeof(T).Name}>"; public static string GetCookieGeneric<U>() => $"SimpleGenericStruct<{typeof(T).Name}>.GetCookieGeneric<{typeof(U).Name}>"; } class SimpleGenericClass<T> : ISimple { public static string GetCookie() => $"SimpleGenericClass<{typeof(T).Name}>"; public static string GetCookieGeneric<U>() => $"SimpleGenericClass<{typeof(T).Name}>.GetCookieGeneric<{typeof(U).Name}>"; } interface IVariant<in T> { static abstract string WhichMethod(T param); } class SimpleVariant : IVariant<Base> { public static string WhichMethod(Base b) => "SimpleVariant.WhichMethod(Base)"; } class SimpleVariantTwice : IVariant<Base>, IVariant<Mid> { public static string WhichMethod(Base b) => "SimpleVariantTwice.WhichMethod(Base)"; public static string WhichMethod(Mid b) => "SimpleVariantTwice.WhichMethod(Mid)"; } class VariantWithInheritanceBase : IVariant<Mid> { public static string WhichMethod(Mid b) => "VariantWithInheritanceBase.WhichMethod(Mid)"; } class VariantWithInheritanceDerived : VariantWithInheritanceBase, IVariant<Base> { public static string WhichMethod(Base b) => "VariantWithInheritanceDerived.WhichMethod(Base)"; } class GenericVariantWithInheritanceBase<T> : IVariant<T> { public static string WhichMethod(T b) => "GenericVariantWithInheritanceBase.WhichMethod(T)"; } class GenericVariantWithInheritanceDerived<T> : GenericVariantWithInheritanceBase<T>, IVariant<T> { public static new string WhichMethod(T b) => $"GenericVariantWithInheritanceDerived.WhichMethod({typeof(T).Name})"; } class GenericVariantWithHiddenBase : IVariant<Mid> { public static string WhichMethod(Mid b) => "GenericVariantWithHiddenBase.WhichMethod(Mid)"; } class GenericVariantWithHiddenDerived<T> : GenericVariantWithHiddenBase, IVariant<T> { public static string WhichMethod(T b) => $"GenericVariantWithHiddenDerived.WhichMethod({typeof(T).Name})"; } struct Struct { } class Base { } class Mid : Base { } class Derived : Mid { } static void TestSimpleInterface<T>(string expected) where T : ISimple { string actual = T.GetCookie(); if (actual != expected) { throw new Exception($"{actual} != {expected}"); } } static void TestSimpleInterfaceWithGenericMethod<T, U>(string expected) where T : ISimple { string actual = T.GetCookieGeneric<U>(); if (actual != expected) { throw new Exception($"{actual} != {expected}"); } } static void TestVariantInterface<T, U>(string expected) where T : IVariant<U> { string actual = T.WhichMethod(default); if (actual != expected) { throw new Exception($"{actual} != {expected}"); } } public static void Run() { TestSimpleInterface<SimpleClass>("SimpleClass"); TestSimpleInterface<SimpleStruct>("SimpleStruct"); TestSimpleInterface<SimpleGenericClass<Base>>("SimpleGenericClass<Base>"); TestSimpleInterface<SimpleGenericStruct<Base>>("SimpleGenericStruct<Base>"); TestSimpleInterfaceWithGenericMethod<SimpleClass, Base>("SimpleClass.GetCookieGeneric<Base>"); TestSimpleInterfaceWithGenericMethod<SimpleStruct, Base>("SimpleStruct.GetCookieGeneric<Base>"); TestSimpleInterfaceWithGenericMethod<SimpleClass, Struct>("SimpleClass.GetCookieGeneric<Struct>"); TestSimpleInterfaceWithGenericMethod<SimpleStruct, Struct>("SimpleStruct.GetCookieGeneric<Struct>"); TestSimpleInterfaceWithGenericMethod<SimpleGenericClass<Base>, Base>("SimpleGenericClass<Base>.GetCookieGeneric<Base>"); TestSimpleInterfaceWithGenericMethod<SimpleGenericStruct<Base>, Base>("SimpleGenericStruct<Base>.GetCookieGeneric<Base>"); TestSimpleInterfaceWithGenericMethod<SimpleGenericClass<Base>, Struct>("SimpleGenericClass<Base>.GetCookieGeneric<Struct>"); TestSimpleInterfaceWithGenericMethod<SimpleGenericStruct<Base>, Struct>("SimpleGenericStruct<Base>.GetCookieGeneric<Struct>"); TestVariantInterface<SimpleVariant, Base>("SimpleVariant.WhichMethod(Base)"); TestVariantInterface<SimpleVariant, Derived>("SimpleVariant.WhichMethod(Base)"); TestVariantInterface<SimpleVariantTwice, Base>("SimpleVariantTwice.WhichMethod(Base)"); TestVariantInterface<SimpleVariantTwice, Mid>("SimpleVariantTwice.WhichMethod(Mid)"); TestVariantInterface<SimpleVariantTwice, Derived>("SimpleVariantTwice.WhichMethod(Base)"); TestVariantInterface<VariantWithInheritanceDerived, Base>("VariantWithInheritanceDerived.WhichMethod(Base)"); TestVariantInterface<VariantWithInheritanceDerived, Mid>("VariantWithInheritanceDerived.WhichMethod(Base)"); TestVariantInterface<VariantWithInheritanceDerived, Derived>("VariantWithInheritanceDerived.WhichMethod(Base)"); TestVariantInterface<GenericVariantWithInheritanceDerived<Base>, Base>("GenericVariantWithInheritanceDerived.WhichMethod(Base)"); TestVariantInterface<GenericVariantWithInheritanceDerived<Base>, Mid>("GenericVariantWithInheritanceDerived.WhichMethod(Base)"); TestVariantInterface<GenericVariantWithInheritanceDerived<Mid>, Mid>("GenericVariantWithInheritanceDerived.WhichMethod(Mid)"); TestVariantInterface<GenericVariantWithHiddenDerived<Base>, Base>("GenericVariantWithHiddenDerived.WhichMethod(Base)"); TestVariantInterface<GenericVariantWithHiddenDerived<Base>, Mid>("GenericVariantWithHiddenDerived.WhichMethod(Base)"); TestVariantInterface<GenericVariantWithHiddenDerived<Mid>, Mid>("GenericVariantWithHiddenDerived.WhichMethod(Mid)"); TestVariantInterface<GenericVariantWithHiddenDerived<Derived>, Mid>("GenericVariantWithHiddenBase.WhichMethod(Mid)"); } } }
1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/libraries/Common/src/Interop/Windows/Crypt32/Interop.CertStoreSaveTo.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. internal static partial class Interop { internal static partial class Crypt32 { internal enum CertStoreSaveTo : int { CERT_STORE_SAVE_TO_MEMORY = 2 } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. internal static partial class Interop { internal static partial class Crypt32 { internal enum CertStoreSaveTo : int { CERT_STORE_SAVE_TO_MEMORY = 2 } } }
-1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/libraries/Microsoft.Extensions.DependencyModel/src/Resolution/ReferenceAssemblyPathResolver.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; using System.IO; namespace Microsoft.Extensions.DependencyModel.Resolution { public class ReferenceAssemblyPathResolver: ICompilationAssemblyResolver { private readonly IFileSystem _fileSystem; private readonly string? _defaultReferenceAssembliesPath; private readonly string[] _fallbackSearchPaths; public ReferenceAssemblyPathResolver() : this(FileSystemWrapper.Default, EnvironmentWrapper.Default) { } public ReferenceAssemblyPathResolver(string? defaultReferenceAssembliesPath, string[] fallbackSearchPaths) : this(FileSystemWrapper.Default, defaultReferenceAssembliesPath, fallbackSearchPaths) { } internal ReferenceAssemblyPathResolver(IFileSystem fileSystem, IEnvironment environment) : this(fileSystem, GetDefaultReferenceAssembliesPath(fileSystem, environment), GetFallbackSearchPaths(fileSystem, environment)) { } internal ReferenceAssemblyPathResolver(IFileSystem fileSystem!!, string? defaultReferenceAssembliesPath, string[] fallbackSearchPaths!!) { _fileSystem = fileSystem; _defaultReferenceAssembliesPath = defaultReferenceAssembliesPath; _fallbackSearchPaths = fallbackSearchPaths; } public bool TryResolveAssemblyPaths(CompilationLibrary library!!, List<string>? assemblies) { if (!string.Equals(library.Type, "referenceassembly", StringComparison.OrdinalIgnoreCase)) { return false; } foreach (string assembly in library.Assemblies) { if (!TryResolveReferenceAssembly(assembly, out string? fullName)) { throw new InvalidOperationException(SR.Format(SR.ReferenceAssemblyNotFound, assembly, library.Name)); } assemblies?.Add(fullName); } return true; } private bool TryResolveReferenceAssembly(string path, [MaybeNullWhen(false)] out string fullPath) { fullPath = null; if (_defaultReferenceAssembliesPath != null) { string relativeToReferenceAssemblies = Path.Combine(_defaultReferenceAssembliesPath, path); if (_fileSystem.File.Exists(relativeToReferenceAssemblies)) { fullPath = relativeToReferenceAssemblies; return true; } } string name = Path.GetFileName(path); foreach (string fallbackPath in _fallbackSearchPaths) { string fallbackFile = Path.Combine(fallbackPath, name); if (_fileSystem.File.Exists(fallbackFile)) { fullPath = fallbackFile; return true; } } return false; } internal static string[] GetFallbackSearchPaths(IFileSystem fileSystem, IEnvironment environment) { if (!environment.IsWindows()) { return Array.Empty<string>(); } string? windir = environment.GetEnvironmentVariable("WINDIR"); if (windir == null) { return Array.Empty<string>(); } string net20Dir = Path.Combine(windir, "Microsoft.NET", "Framework", "v2.0.50727"); if (!fileSystem.Directory.Exists(net20Dir)) { return Array.Empty<string>(); } return new[] { net20Dir }; } internal static string? GetDefaultReferenceAssembliesPath(IFileSystem fileSystem, IEnvironment environment) { // Allow setting the reference assemblies path via an environment variable string? referenceAssembliesPath = DotNetReferenceAssembliesPathResolver.Resolve(environment, fileSystem); if (!string.IsNullOrEmpty(referenceAssembliesPath)) { return referenceAssembliesPath; } if (!environment.IsWindows()) { // There is no reference assemblies path outside of windows // The environment variable can be used to specify one return null; } // References assemblies are in %ProgramFiles(x86)% on // 64 bit machines string? programFiles = environment.GetEnvironmentVariable("ProgramFiles(x86)"); if (string.IsNullOrEmpty(programFiles)) { // On 32 bit machines they are in %ProgramFiles% programFiles = environment.GetEnvironmentVariable("ProgramFiles"); } if (string.IsNullOrEmpty(programFiles)) { // Reference assemblies aren't installed return null; } return Path.Combine( programFiles, "Reference Assemblies", "Microsoft", "Framework"); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; using System.IO; namespace Microsoft.Extensions.DependencyModel.Resolution { public class ReferenceAssemblyPathResolver: ICompilationAssemblyResolver { private readonly IFileSystem _fileSystem; private readonly string? _defaultReferenceAssembliesPath; private readonly string[] _fallbackSearchPaths; public ReferenceAssemblyPathResolver() : this(FileSystemWrapper.Default, EnvironmentWrapper.Default) { } public ReferenceAssemblyPathResolver(string? defaultReferenceAssembliesPath, string[] fallbackSearchPaths) : this(FileSystemWrapper.Default, defaultReferenceAssembliesPath, fallbackSearchPaths) { } internal ReferenceAssemblyPathResolver(IFileSystem fileSystem, IEnvironment environment) : this(fileSystem, GetDefaultReferenceAssembliesPath(fileSystem, environment), GetFallbackSearchPaths(fileSystem, environment)) { } internal ReferenceAssemblyPathResolver(IFileSystem fileSystem!!, string? defaultReferenceAssembliesPath, string[] fallbackSearchPaths!!) { _fileSystem = fileSystem; _defaultReferenceAssembliesPath = defaultReferenceAssembliesPath; _fallbackSearchPaths = fallbackSearchPaths; } public bool TryResolveAssemblyPaths(CompilationLibrary library!!, List<string>? assemblies) { if (!string.Equals(library.Type, "referenceassembly", StringComparison.OrdinalIgnoreCase)) { return false; } foreach (string assembly in library.Assemblies) { if (!TryResolveReferenceAssembly(assembly, out string? fullName)) { throw new InvalidOperationException(SR.Format(SR.ReferenceAssemblyNotFound, assembly, library.Name)); } assemblies?.Add(fullName); } return true; } private bool TryResolveReferenceAssembly(string path, [MaybeNullWhen(false)] out string fullPath) { fullPath = null; if (_defaultReferenceAssembliesPath != null) { string relativeToReferenceAssemblies = Path.Combine(_defaultReferenceAssembliesPath, path); if (_fileSystem.File.Exists(relativeToReferenceAssemblies)) { fullPath = relativeToReferenceAssemblies; return true; } } string name = Path.GetFileName(path); foreach (string fallbackPath in _fallbackSearchPaths) { string fallbackFile = Path.Combine(fallbackPath, name); if (_fileSystem.File.Exists(fallbackFile)) { fullPath = fallbackFile; return true; } } return false; } internal static string[] GetFallbackSearchPaths(IFileSystem fileSystem, IEnvironment environment) { if (!environment.IsWindows()) { return Array.Empty<string>(); } string? windir = environment.GetEnvironmentVariable("WINDIR"); if (windir == null) { return Array.Empty<string>(); } string net20Dir = Path.Combine(windir, "Microsoft.NET", "Framework", "v2.0.50727"); if (!fileSystem.Directory.Exists(net20Dir)) { return Array.Empty<string>(); } return new[] { net20Dir }; } internal static string? GetDefaultReferenceAssembliesPath(IFileSystem fileSystem, IEnvironment environment) { // Allow setting the reference assemblies path via an environment variable string? referenceAssembliesPath = DotNetReferenceAssembliesPathResolver.Resolve(environment, fileSystem); if (!string.IsNullOrEmpty(referenceAssembliesPath)) { return referenceAssembliesPath; } if (!environment.IsWindows()) { // There is no reference assemblies path outside of windows // The environment variable can be used to specify one return null; } // References assemblies are in %ProgramFiles(x86)% on // 64 bit machines string? programFiles = environment.GetEnvironmentVariable("ProgramFiles(x86)"); if (string.IsNullOrEmpty(programFiles)) { // On 32 bit machines they are in %ProgramFiles% programFiles = environment.GetEnvironmentVariable("ProgramFiles"); } if (string.IsNullOrEmpty(programFiles)) { // Reference assemblies aren't installed return null; } return Path.Combine( programFiles, "Reference Assemblies", "Microsoft", "Framework"); } } }
-1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/tests/JIT/Regression/Dev11/dev11_4421/Dev11_4421.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; internal static class Module { private static int Main() { int Var1, Temp; try { checked { for (Temp = int.MaxValue - 3; Temp <= int.MaxValue - 1; Temp++) Var1 = (int)(2 + Temp); } } catch (Exception ex) { Console.WriteLine("Expected Overflow Error: " + ex.ToString()); return 100; } return -1; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; internal static class Module { private static int Main() { int Var1, Temp; try { checked { for (Temp = int.MaxValue - 3; Temp <= int.MaxValue - 1; Temp++) Var1 = (int)(2 + Temp); } } catch (Exception ex) { Console.WriteLine("Expected Overflow Error: " + ex.ToString()); return 100; } return -1; } }
-1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/libraries/System.ComponentModel.TypeConverter/tests/Design/DesignerVerbCollectionTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Linq; using Xunit; namespace System.ComponentModel.Design.Tests { public class DesignerVerbCollectionTests { [Fact] public void Ctor_Default() { var collection = new DesignerVerbCollection(); Assert.Equal(0, collection.Count); Assert.Empty(collection); } [Fact] public void Ctor_Value() { var value = new DesignerVerb[] { new DesignerVerb("Text", null), new DesignerVerb("Text", null) }; var collection = new DesignerVerbCollection(value); Assert.Equal(2, collection.Count); Assert.Equal(value, collection.Cast<DesignerVerb>()); } [Fact] public void Ctor_NullValue_ThrowsArgumentNullException() { AssertExtensions.Throws<ArgumentNullException>("value", () => new DesignerVerbCollection(null)); } [Fact] public void Add_ValidValue_Success() { var verb = new DesignerVerb("Text", null); var collection = new DesignerVerbCollection { verb, null }; Assert.Equal(2, collection.Count); Assert.Same(verb, collection[0]); Assert.Null(collection[1]); } [Fact] public void Insert_ValidValue_Success() { var verb = new DesignerVerb("Text", null); var collection = new DesignerVerbCollection { new DesignerVerb("Text", null) }; collection.Insert(0, verb); Assert.Equal(2, collection.Count); Assert.Same(verb, collection[0]); collection.Insert(0, null); Assert.Equal(3, collection.Count); Assert.Null(collection[0]); } [Fact] public void Remove_ValidValue_Success() { var verb = new DesignerVerb("Text", null); var collection = new DesignerVerbCollection { verb }; collection.Remove(verb); Assert.Empty(collection); collection.Add(null); collection.Remove(null); Assert.Empty(collection); } [Fact] public void Item_SetValidValue_Success() { var verb = new DesignerVerb("Text", null); var collection = new DesignerVerbCollection { new DesignerVerb("Text", null) }; collection[0] = verb; Assert.Equal(1, collection.Count); Assert.Same(verb, collection[0]); collection[0] = null; Assert.Null(collection[0]); } [Fact] public void AddRange_DesignerVerbArray_Success() { var value = new DesignerVerb[] { new DesignerVerb("Text", null), new DesignerVerb("Text", null) }; var collection = new DesignerVerbCollection(); collection.AddRange(value); Assert.Equal(2, collection.Count); Assert.Equal(value, collection.Cast<DesignerVerb>()); } [Fact] public void AddRange_DesignerVerbCollection_Success() { var value = new DesignerVerb[] { new DesignerVerb("Text", null), new DesignerVerb("Text", null) }; var collection = new DesignerVerbCollection(); collection.AddRange(new DesignerVerbCollection(value)); Assert.Equal(2, collection.Count); Assert.Equal(value, collection.Cast<DesignerVerb>()); } [Fact] public void AddRange_ThisDesignerVerbCollection_Success() { var value = new DesignerVerb[] { new DesignerVerb("Text", null), new DesignerVerb("Text", null) }; var collection = new DesignerVerbCollection(value); collection.AddRange(collection); Assert.Equal(4, collection.Count); Assert.Equal(value.Concat(value), collection.Cast<DesignerVerb>()); } [Fact] public void AddRange_NullValue_ThrowsArgumentNullException() { var collection = new DesignerVerbCollection(); AssertExtensions.Throws<ArgumentNullException>("value", () => collection.AddRange((DesignerVerb[])null)); AssertExtensions.Throws<ArgumentNullException>("value", () => collection.AddRange((DesignerVerbCollection)null)); } [Fact] public void Contains_Value_ReturnsExpected() { var verb = new DesignerVerb("Text", null); var collection = new DesignerVerbCollection { verb }; Assert.True(collection.Contains(verb)); Assert.False(collection.Contains(new DesignerVerb("Text", null))); Assert.False(collection.Contains(null)); } [Fact] public void IndexOf_Value_ReturnsExpected() { var verb = new DesignerVerb("Text", null); var collection = new DesignerVerbCollection { verb }; Assert.Equal(0, collection.IndexOf(verb)); Assert.Equal(-1, collection.IndexOf(new DesignerVerb("Text", null))); Assert.Equal(-1, collection.IndexOf(null)); } [Fact] public void CopyTo_ValidDestination_Success() { var verb = new DesignerVerb("Text", null); var collection = new DesignerVerbCollection { verb, verb }; var destination = new DesignerVerb[3]; collection.CopyTo(destination, 1); Assert.Equal(new DesignerVerb[] { null, verb, verb }, destination); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Linq; using Xunit; namespace System.ComponentModel.Design.Tests { public class DesignerVerbCollectionTests { [Fact] public void Ctor_Default() { var collection = new DesignerVerbCollection(); Assert.Equal(0, collection.Count); Assert.Empty(collection); } [Fact] public void Ctor_Value() { var value = new DesignerVerb[] { new DesignerVerb("Text", null), new DesignerVerb("Text", null) }; var collection = new DesignerVerbCollection(value); Assert.Equal(2, collection.Count); Assert.Equal(value, collection.Cast<DesignerVerb>()); } [Fact] public void Ctor_NullValue_ThrowsArgumentNullException() { AssertExtensions.Throws<ArgumentNullException>("value", () => new DesignerVerbCollection(null)); } [Fact] public void Add_ValidValue_Success() { var verb = new DesignerVerb("Text", null); var collection = new DesignerVerbCollection { verb, null }; Assert.Equal(2, collection.Count); Assert.Same(verb, collection[0]); Assert.Null(collection[1]); } [Fact] public void Insert_ValidValue_Success() { var verb = new DesignerVerb("Text", null); var collection = new DesignerVerbCollection { new DesignerVerb("Text", null) }; collection.Insert(0, verb); Assert.Equal(2, collection.Count); Assert.Same(verb, collection[0]); collection.Insert(0, null); Assert.Equal(3, collection.Count); Assert.Null(collection[0]); } [Fact] public void Remove_ValidValue_Success() { var verb = new DesignerVerb("Text", null); var collection = new DesignerVerbCollection { verb }; collection.Remove(verb); Assert.Empty(collection); collection.Add(null); collection.Remove(null); Assert.Empty(collection); } [Fact] public void Item_SetValidValue_Success() { var verb = new DesignerVerb("Text", null); var collection = new DesignerVerbCollection { new DesignerVerb("Text", null) }; collection[0] = verb; Assert.Equal(1, collection.Count); Assert.Same(verb, collection[0]); collection[0] = null; Assert.Null(collection[0]); } [Fact] public void AddRange_DesignerVerbArray_Success() { var value = new DesignerVerb[] { new DesignerVerb("Text", null), new DesignerVerb("Text", null) }; var collection = new DesignerVerbCollection(); collection.AddRange(value); Assert.Equal(2, collection.Count); Assert.Equal(value, collection.Cast<DesignerVerb>()); } [Fact] public void AddRange_DesignerVerbCollection_Success() { var value = new DesignerVerb[] { new DesignerVerb("Text", null), new DesignerVerb("Text", null) }; var collection = new DesignerVerbCollection(); collection.AddRange(new DesignerVerbCollection(value)); Assert.Equal(2, collection.Count); Assert.Equal(value, collection.Cast<DesignerVerb>()); } [Fact] public void AddRange_ThisDesignerVerbCollection_Success() { var value = new DesignerVerb[] { new DesignerVerb("Text", null), new DesignerVerb("Text", null) }; var collection = new DesignerVerbCollection(value); collection.AddRange(collection); Assert.Equal(4, collection.Count); Assert.Equal(value.Concat(value), collection.Cast<DesignerVerb>()); } [Fact] public void AddRange_NullValue_ThrowsArgumentNullException() { var collection = new DesignerVerbCollection(); AssertExtensions.Throws<ArgumentNullException>("value", () => collection.AddRange((DesignerVerb[])null)); AssertExtensions.Throws<ArgumentNullException>("value", () => collection.AddRange((DesignerVerbCollection)null)); } [Fact] public void Contains_Value_ReturnsExpected() { var verb = new DesignerVerb("Text", null); var collection = new DesignerVerbCollection { verb }; Assert.True(collection.Contains(verb)); Assert.False(collection.Contains(new DesignerVerb("Text", null))); Assert.False(collection.Contains(null)); } [Fact] public void IndexOf_Value_ReturnsExpected() { var verb = new DesignerVerb("Text", null); var collection = new DesignerVerbCollection { verb }; Assert.Equal(0, collection.IndexOf(verb)); Assert.Equal(-1, collection.IndexOf(new DesignerVerb("Text", null))); Assert.Equal(-1, collection.IndexOf(null)); } [Fact] public void CopyTo_ValidDestination_Success() { var verb = new DesignerVerb("Text", null); var collection = new DesignerVerbCollection { verb, verb }; var destination = new DesignerVerb[3]; collection.CopyTo(destination, 1); Assert.Equal(new DesignerVerb[] { null, verb, verb }, destination); } } }
-1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/libraries/System.DirectoryServices/tests/System/DirectoryServices/DirectorySynchronizationTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.ComponentModel; using Xunit; namespace System.DirectoryServices.Tests { public class DirectorySynchronizationTests { [Fact] public void Ctor_Default() { var synchronization = new DirectorySynchronization(); Assert.Equal(DirectorySynchronizationOptions.None, synchronization.Option); Assert.Empty(synchronization.GetDirectorySynchronizationCookie()); } [Theory] [InlineData(DirectorySynchronizationOptions.None)] [InlineData(DirectorySynchronizationOptions.IncrementalValues | DirectorySynchronizationOptions.ObjectSecurity)] public void Ctor_Option(DirectorySynchronizationOptions option) { var synchronization = new DirectorySynchronization(option); Assert.Equal(option, synchronization.Option); Assert.Empty(synchronization.GetDirectorySynchronizationCookie()); } [Theory] [InlineData(null)] [InlineData(new byte[0])] [InlineData(new byte[] { 1, 2, 3, })] public void Ctor_Cookie(byte[] cookie) { var synchronization = new DirectorySynchronization(cookie); Assert.Equal(DirectorySynchronizationOptions.None, synchronization.Option); byte[] synchronizationCookie = synchronization.GetDirectorySynchronizationCookie(); Assert.NotSame(synchronizationCookie, cookie); Assert.Equal(cookie ?? Array.Empty<byte>(), synchronizationCookie); } [Theory] [InlineData(DirectorySynchronizationOptions.None, null)] [InlineData(DirectorySynchronizationOptions.IncrementalValues, new byte[0])] [InlineData(DirectorySynchronizationOptions.IncrementalValues | DirectorySynchronizationOptions.ObjectSecurity, new byte[] { 1, 2, 3 })] public void Ctor_Option_Cookie(DirectorySynchronizationOptions option, byte[] cookie) { var synchronization = new DirectorySynchronization(option, cookie); Assert.Equal(option, synchronization.Option); byte[] synchronizationCookie = synchronization.GetDirectorySynchronizationCookie(); Assert.NotSame(synchronizationCookie, cookie); Assert.Equal(cookie ?? Array.Empty<byte>(), synchronizationCookie); } [Theory] [InlineData((DirectorySynchronizationOptions)(-1))] [InlineData((DirectorySynchronizationOptions)int.MaxValue)] public void Ctor_InvalidOption_ThrowsInvalidEnumArgumentException(DirectorySynchronizationOptions options) { AssertExtensions.Throws<InvalidEnumArgumentException>("value", () => new DirectorySynchronization(options)); AssertExtensions.Throws<InvalidEnumArgumentException>("value", () => new DirectorySynchronization(options, new byte[0])); } public static IEnumerable<object[]> Ctor_Synchronization_TestData() { yield return new object[] { null }; yield return new object[] { new DirectorySynchronization(DirectorySynchronizationOptions.ObjectSecurity, new byte[] { 1, 2, 3 }) }; } [Theory] [MemberData(nameof(Ctor_Synchronization_TestData))] public void Ctor_Synchronization(DirectorySynchronization otherSynchronization) { var synchronization = new DirectorySynchronization(otherSynchronization); Assert.Equal(otherSynchronization?.Option ?? DirectorySynchronizationOptions.None, synchronization.Option); Assert.Equal(otherSynchronization?.GetDirectorySynchronizationCookie() ?? Array.Empty<byte>(), synchronization.GetDirectorySynchronizationCookie()); } [Fact] public void Copy_Invoke_ReturnsExpected() { var synchronization = new DirectorySynchronization(DirectorySynchronizationOptions.ObjectSecurity, new byte[] { 1, 2, 3 }); DirectorySynchronization copy = synchronization.Copy(); Assert.NotSame(synchronization, copy); Assert.Equal(DirectorySynchronizationOptions.ObjectSecurity, synchronization.Option); Assert.Equal(new byte[] { 1, 2, 3 }, synchronization.GetDirectorySynchronizationCookie()); } [Fact] public void ResetDirectorySynchronizationCookie_Parameterless_SetsToEmpty() { var synchronization = new DirectorySynchronization(new byte[] { 1, 2, 3 }); synchronization.ResetDirectorySynchronizationCookie(); Assert.Empty(synchronization.GetDirectorySynchronizationCookie()); } [Theory] [InlineData(null)] [InlineData(new byte[0])] [InlineData(new byte[] { 1, 2, 3 })] public void ResetDirectorySynchronizationCookie_Cookie_SetsToEmpty(byte[] cookie) { var synchronization = new DirectorySynchronization(new byte[] { 255, 255, 255 }); synchronization.ResetDirectorySynchronizationCookie(cookie); Assert.Equal(cookie ?? Array.Empty<byte>(), synchronization.GetDirectorySynchronizationCookie()); } [Fact] public void ResetDirectorySynchronizationCookie_Cookie_MakesCopyOfCookie() { var cookie = new byte[] { 1, 2, 3 }; var synchronization = new DirectorySynchronization(); synchronization.ResetDirectorySynchronizationCookie(cookie); cookie[0] = 20; Assert.Equal(new byte[] { 1, 2, 3 }, synchronization.GetDirectorySynchronizationCookie()); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.ComponentModel; using Xunit; namespace System.DirectoryServices.Tests { public class DirectorySynchronizationTests { [Fact] public void Ctor_Default() { var synchronization = new DirectorySynchronization(); Assert.Equal(DirectorySynchronizationOptions.None, synchronization.Option); Assert.Empty(synchronization.GetDirectorySynchronizationCookie()); } [Theory] [InlineData(DirectorySynchronizationOptions.None)] [InlineData(DirectorySynchronizationOptions.IncrementalValues | DirectorySynchronizationOptions.ObjectSecurity)] public void Ctor_Option(DirectorySynchronizationOptions option) { var synchronization = new DirectorySynchronization(option); Assert.Equal(option, synchronization.Option); Assert.Empty(synchronization.GetDirectorySynchronizationCookie()); } [Theory] [InlineData(null)] [InlineData(new byte[0])] [InlineData(new byte[] { 1, 2, 3, })] public void Ctor_Cookie(byte[] cookie) { var synchronization = new DirectorySynchronization(cookie); Assert.Equal(DirectorySynchronizationOptions.None, synchronization.Option); byte[] synchronizationCookie = synchronization.GetDirectorySynchronizationCookie(); Assert.NotSame(synchronizationCookie, cookie); Assert.Equal(cookie ?? Array.Empty<byte>(), synchronizationCookie); } [Theory] [InlineData(DirectorySynchronizationOptions.None, null)] [InlineData(DirectorySynchronizationOptions.IncrementalValues, new byte[0])] [InlineData(DirectorySynchronizationOptions.IncrementalValues | DirectorySynchronizationOptions.ObjectSecurity, new byte[] { 1, 2, 3 })] public void Ctor_Option_Cookie(DirectorySynchronizationOptions option, byte[] cookie) { var synchronization = new DirectorySynchronization(option, cookie); Assert.Equal(option, synchronization.Option); byte[] synchronizationCookie = synchronization.GetDirectorySynchronizationCookie(); Assert.NotSame(synchronizationCookie, cookie); Assert.Equal(cookie ?? Array.Empty<byte>(), synchronizationCookie); } [Theory] [InlineData((DirectorySynchronizationOptions)(-1))] [InlineData((DirectorySynchronizationOptions)int.MaxValue)] public void Ctor_InvalidOption_ThrowsInvalidEnumArgumentException(DirectorySynchronizationOptions options) { AssertExtensions.Throws<InvalidEnumArgumentException>("value", () => new DirectorySynchronization(options)); AssertExtensions.Throws<InvalidEnumArgumentException>("value", () => new DirectorySynchronization(options, new byte[0])); } public static IEnumerable<object[]> Ctor_Synchronization_TestData() { yield return new object[] { null }; yield return new object[] { new DirectorySynchronization(DirectorySynchronizationOptions.ObjectSecurity, new byte[] { 1, 2, 3 }) }; } [Theory] [MemberData(nameof(Ctor_Synchronization_TestData))] public void Ctor_Synchronization(DirectorySynchronization otherSynchronization) { var synchronization = new DirectorySynchronization(otherSynchronization); Assert.Equal(otherSynchronization?.Option ?? DirectorySynchronizationOptions.None, synchronization.Option); Assert.Equal(otherSynchronization?.GetDirectorySynchronizationCookie() ?? Array.Empty<byte>(), synchronization.GetDirectorySynchronizationCookie()); } [Fact] public void Copy_Invoke_ReturnsExpected() { var synchronization = new DirectorySynchronization(DirectorySynchronizationOptions.ObjectSecurity, new byte[] { 1, 2, 3 }); DirectorySynchronization copy = synchronization.Copy(); Assert.NotSame(synchronization, copy); Assert.Equal(DirectorySynchronizationOptions.ObjectSecurity, synchronization.Option); Assert.Equal(new byte[] { 1, 2, 3 }, synchronization.GetDirectorySynchronizationCookie()); } [Fact] public void ResetDirectorySynchronizationCookie_Parameterless_SetsToEmpty() { var synchronization = new DirectorySynchronization(new byte[] { 1, 2, 3 }); synchronization.ResetDirectorySynchronizationCookie(); Assert.Empty(synchronization.GetDirectorySynchronizationCookie()); } [Theory] [InlineData(null)] [InlineData(new byte[0])] [InlineData(new byte[] { 1, 2, 3 })] public void ResetDirectorySynchronizationCookie_Cookie_SetsToEmpty(byte[] cookie) { var synchronization = new DirectorySynchronization(new byte[] { 255, 255, 255 }); synchronization.ResetDirectorySynchronizationCookie(cookie); Assert.Equal(cookie ?? Array.Empty<byte>(), synchronization.GetDirectorySynchronizationCookie()); } [Fact] public void ResetDirectorySynchronizationCookie_Cookie_MakesCopyOfCookie() { var cookie = new byte[] { 1, 2, 3 }; var synchronization = new DirectorySynchronization(); synchronization.ResetDirectorySynchronizationCookie(cookie); cookie[0] = 20; Assert.Equal(new byte[] { 1, 2, 3 }, synchronization.GetDirectorySynchronizationCookie()); } } }
-1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd.Arm64/TransposeOdd.Vector128.UInt16.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void TransposeOdd_Vector128_UInt16() { var test = new SimpleBinaryOpTest__TransposeOdd_Vector128_UInt16(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__TransposeOdd_Vector128_UInt16 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(UInt16[] inArray1, UInt16[] inArray2, UInt16[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt16>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt16>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt16>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt16, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt16, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<UInt16> _fld1; public Vector128<UInt16> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__TransposeOdd_Vector128_UInt16 testClass) { var result = AdvSimd.Arm64.TransposeOdd(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__TransposeOdd_Vector128_UInt16 testClass) { fixed (Vector128<UInt16>* pFld1 = &_fld1) fixed (Vector128<UInt16>* pFld2 = &_fld2) { var result = AdvSimd.Arm64.TransposeOdd( AdvSimd.LoadVector128((UInt16*)(pFld1)), AdvSimd.LoadVector128((UInt16*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<UInt16>>() / sizeof(UInt16); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<UInt16>>() / sizeof(UInt16); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<UInt16>>() / sizeof(UInt16); private static UInt16[] _data1 = new UInt16[Op1ElementCount]; private static UInt16[] _data2 = new UInt16[Op2ElementCount]; private static Vector128<UInt16> _clsVar1; private static Vector128<UInt16> _clsVar2; private Vector128<UInt16> _fld1; private Vector128<UInt16> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__TransposeOdd_Vector128_UInt16() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _clsVar1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _clsVar2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); } public SimpleBinaryOpTest__TransposeOdd_Vector128_UInt16() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _fld1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } _dataTable = new DataTable(_data1, _data2, new UInt16[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.Arm64.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.Arm64.TransposeOdd( Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.Arm64.TransposeOdd( AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.TransposeOdd), new Type[] { typeof(Vector128<UInt16>), typeof(Vector128<UInt16>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.TransposeOdd), new Type[] { typeof(Vector128<UInt16>), typeof(Vector128<UInt16>) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.Arm64.TransposeOdd( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<UInt16>* pClsVar1 = &_clsVar1) fixed (Vector128<UInt16>* pClsVar2 = &_clsVar2) { var result = AdvSimd.Arm64.TransposeOdd( AdvSimd.LoadVector128((UInt16*)(pClsVar1)), AdvSimd.LoadVector128((UInt16*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray2Ptr); var result = AdvSimd.Arm64.TransposeOdd(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray2Ptr)); var result = AdvSimd.Arm64.TransposeOdd(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__TransposeOdd_Vector128_UInt16(); var result = AdvSimd.Arm64.TransposeOdd(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__TransposeOdd_Vector128_UInt16(); fixed (Vector128<UInt16>* pFld1 = &test._fld1) fixed (Vector128<UInt16>* pFld2 = &test._fld2) { var result = AdvSimd.Arm64.TransposeOdd( AdvSimd.LoadVector128((UInt16*)(pFld1)), AdvSimd.LoadVector128((UInt16*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.Arm64.TransposeOdd(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<UInt16>* pFld1 = &_fld1) fixed (Vector128<UInt16>* pFld2 = &_fld2) { var result = AdvSimd.Arm64.TransposeOdd( AdvSimd.LoadVector128((UInt16*)(pFld1)), AdvSimd.LoadVector128((UInt16*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.Arm64.TransposeOdd(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.Arm64.TransposeOdd( AdvSimd.LoadVector128((UInt16*)(&test._fld1)), AdvSimd.LoadVector128((UInt16*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<UInt16> op1, Vector128<UInt16> op2, void* result, [CallerMemberName] string method = "") { UInt16[] inArray1 = new UInt16[Op1ElementCount]; UInt16[] inArray2 = new UInt16[Op2ElementCount]; UInt16[] outArray = new UInt16[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { UInt16[] inArray1 = new UInt16[Op1ElementCount]; UInt16[] inArray2 = new UInt16[Op2ElementCount]; UInt16[] outArray = new UInt16[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(UInt16[] left, UInt16[] right, UInt16[] result, [CallerMemberName] string method = "") { bool succeeded = true; int index = 0; int half = RetElementCount / 2; for (var i = 0; i < RetElementCount; i+=2, index++) { if (result[index] != left[i+1] || result[++index] != right[i+1]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd.Arm64)}.{nameof(AdvSimd.Arm64.TransposeOdd)}<UInt16>(Vector128<UInt16>, Vector128<UInt16>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void TransposeOdd_Vector128_UInt16() { var test = new SimpleBinaryOpTest__TransposeOdd_Vector128_UInt16(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__TransposeOdd_Vector128_UInt16 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(UInt16[] inArray1, UInt16[] inArray2, UInt16[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt16>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt16>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt16>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt16, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt16, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<UInt16> _fld1; public Vector128<UInt16> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__TransposeOdd_Vector128_UInt16 testClass) { var result = AdvSimd.Arm64.TransposeOdd(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__TransposeOdd_Vector128_UInt16 testClass) { fixed (Vector128<UInt16>* pFld1 = &_fld1) fixed (Vector128<UInt16>* pFld2 = &_fld2) { var result = AdvSimd.Arm64.TransposeOdd( AdvSimd.LoadVector128((UInt16*)(pFld1)), AdvSimd.LoadVector128((UInt16*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<UInt16>>() / sizeof(UInt16); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<UInt16>>() / sizeof(UInt16); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<UInt16>>() / sizeof(UInt16); private static UInt16[] _data1 = new UInt16[Op1ElementCount]; private static UInt16[] _data2 = new UInt16[Op2ElementCount]; private static Vector128<UInt16> _clsVar1; private static Vector128<UInt16> _clsVar2; private Vector128<UInt16> _fld1; private Vector128<UInt16> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__TransposeOdd_Vector128_UInt16() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _clsVar1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _clsVar2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); } public SimpleBinaryOpTest__TransposeOdd_Vector128_UInt16() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _fld1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } _dataTable = new DataTable(_data1, _data2, new UInt16[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.Arm64.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.Arm64.TransposeOdd( Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.Arm64.TransposeOdd( AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.TransposeOdd), new Type[] { typeof(Vector128<UInt16>), typeof(Vector128<UInt16>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.TransposeOdd), new Type[] { typeof(Vector128<UInt16>), typeof(Vector128<UInt16>) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.Arm64.TransposeOdd( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<UInt16>* pClsVar1 = &_clsVar1) fixed (Vector128<UInt16>* pClsVar2 = &_clsVar2) { var result = AdvSimd.Arm64.TransposeOdd( AdvSimd.LoadVector128((UInt16*)(pClsVar1)), AdvSimd.LoadVector128((UInt16*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray2Ptr); var result = AdvSimd.Arm64.TransposeOdd(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray2Ptr)); var result = AdvSimd.Arm64.TransposeOdd(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__TransposeOdd_Vector128_UInt16(); var result = AdvSimd.Arm64.TransposeOdd(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__TransposeOdd_Vector128_UInt16(); fixed (Vector128<UInt16>* pFld1 = &test._fld1) fixed (Vector128<UInt16>* pFld2 = &test._fld2) { var result = AdvSimd.Arm64.TransposeOdd( AdvSimd.LoadVector128((UInt16*)(pFld1)), AdvSimd.LoadVector128((UInt16*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.Arm64.TransposeOdd(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<UInt16>* pFld1 = &_fld1) fixed (Vector128<UInt16>* pFld2 = &_fld2) { var result = AdvSimd.Arm64.TransposeOdd( AdvSimd.LoadVector128((UInt16*)(pFld1)), AdvSimd.LoadVector128((UInt16*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.Arm64.TransposeOdd(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.Arm64.TransposeOdd( AdvSimd.LoadVector128((UInt16*)(&test._fld1)), AdvSimd.LoadVector128((UInt16*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<UInt16> op1, Vector128<UInt16> op2, void* result, [CallerMemberName] string method = "") { UInt16[] inArray1 = new UInt16[Op1ElementCount]; UInt16[] inArray2 = new UInt16[Op2ElementCount]; UInt16[] outArray = new UInt16[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { UInt16[] inArray1 = new UInt16[Op1ElementCount]; UInt16[] inArray2 = new UInt16[Op2ElementCount]; UInt16[] outArray = new UInt16[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(UInt16[] left, UInt16[] right, UInt16[] result, [CallerMemberName] string method = "") { bool succeeded = true; int index = 0; int half = RetElementCount / 2; for (var i = 0; i < RetElementCount; i+=2, index++) { if (result[index] != left[i+1] || result[++index] != right[i+1]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd.Arm64)}.{nameof(AdvSimd.Arm64.TransposeOdd)}<UInt16>(Vector128<UInt16>, Vector128<UInt16>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/coreclr/nativeaot/System.Private.Reflection.Core/src/System/Reflection/Runtime/General/BlockedRuntimeTypeNameGenerator.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Text; using System.Diagnostics; using System.Collections.Concurrent; namespace System.Reflection.Runtime.General { // // This class dispenses randomized strings (that serve as both the fake name and fake assembly container) for // reflection-blocked types. // // The names are randomized to prevent apps from hard-wiring dependencies on them or attempting to serialize them // across app execution. // internal static class BlockedRuntimeTypeNameGenerator { public static string GetNameForBlockedRuntimeType(RuntimeTypeHandle typeHandle) { string name = s_blockedNameTable.GetOrAdd(new RuntimeTypeHandleKey(typeHandle)); return name; } private sealed class BlockedRuntimeTypeNameTable : ConcurrentUnifier<RuntimeTypeHandleKey, string> { protected sealed override string Factory(RuntimeTypeHandleKey key) { uint count = s_counter++; return $"$BlockedFromReflection_{count}_{Guid.NewGuid().ToString().Substring(0, 8)}"; } private static uint s_counter; } private static readonly BlockedRuntimeTypeNameTable s_blockedNameTable = new BlockedRuntimeTypeNameTable(); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Text; using System.Diagnostics; using System.Collections.Concurrent; namespace System.Reflection.Runtime.General { // // This class dispenses randomized strings (that serve as both the fake name and fake assembly container) for // reflection-blocked types. // // The names are randomized to prevent apps from hard-wiring dependencies on them or attempting to serialize them // across app execution. // internal static class BlockedRuntimeTypeNameGenerator { public static string GetNameForBlockedRuntimeType(RuntimeTypeHandle typeHandle) { string name = s_blockedNameTable.GetOrAdd(new RuntimeTypeHandleKey(typeHandle)); return name; } private sealed class BlockedRuntimeTypeNameTable : ConcurrentUnifier<RuntimeTypeHandleKey, string> { protected sealed override string Factory(RuntimeTypeHandleKey key) { uint count = s_counter++; return $"$BlockedFromReflection_{count}_{Guid.NewGuid().ToString().Substring(0, 8)}"; } private static uint s_counter; } private static readonly BlockedRuntimeTypeNameTable s_blockedNameTable = new BlockedRuntimeTypeNameTable(); } }
-1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/libraries/System.Private.Xml/src/System/Xml/Core/LocalAppContextSwitches.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.CompilerServices; namespace System { internal static partial class LocalAppContextSwitches { private static int s_dontThrowOnInvalidSurrogatePairs; public static bool DontThrowOnInvalidSurrogatePairs { [MethodImpl(MethodImplOptions.AggressiveInlining)] get { return GetCachedSwitchValue("Switch.System.Xml.DontThrowOnInvalidSurrogatePairs", ref s_dontThrowOnInvalidSurrogatePairs); } } private static int s_ignoreEmptyKeySequences; public static bool IgnoreEmptyKeySequences { [MethodImpl(MethodImplOptions.AggressiveInlining)] get { return GetCachedSwitchValue("Switch.System.Xml.IgnoreEmptyKeySequencess", ref s_ignoreEmptyKeySequences); } } private static int s_ignoreKindInUtcTimeSerialization; public static bool IgnoreKindInUtcTimeSerialization { [MethodImpl(MethodImplOptions.AggressiveInlining)] get { return GetCachedSwitchValue("Switch.System.Xml.IgnoreKindInUtcTimeSerialization", ref s_ignoreKindInUtcTimeSerialization); } } private static int s_limitXPathComplexity; public static bool LimitXPathComplexity { [MethodImpl(MethodImplOptions.AggressiveInlining)] get { return GetCachedSwitchValue("Switch.System.Xml.LimitXPathComplexity", ref s_limitXPathComplexity); } } private static int s_allowDefaultResolver; public static bool AllowDefaultResolver { [MethodImpl(MethodImplOptions.AggressiveInlining)] get { return GetCachedSwitchValue("Switch.System.Xml.AllowDefaultResolver", ref s_allowDefaultResolver); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.CompilerServices; namespace System { internal static partial class LocalAppContextSwitches { private static int s_dontThrowOnInvalidSurrogatePairs; public static bool DontThrowOnInvalidSurrogatePairs { [MethodImpl(MethodImplOptions.AggressiveInlining)] get { return GetCachedSwitchValue("Switch.System.Xml.DontThrowOnInvalidSurrogatePairs", ref s_dontThrowOnInvalidSurrogatePairs); } } private static int s_ignoreEmptyKeySequences; public static bool IgnoreEmptyKeySequences { [MethodImpl(MethodImplOptions.AggressiveInlining)] get { return GetCachedSwitchValue("Switch.System.Xml.IgnoreEmptyKeySequencess", ref s_ignoreEmptyKeySequences); } } private static int s_ignoreKindInUtcTimeSerialization; public static bool IgnoreKindInUtcTimeSerialization { [MethodImpl(MethodImplOptions.AggressiveInlining)] get { return GetCachedSwitchValue("Switch.System.Xml.IgnoreKindInUtcTimeSerialization", ref s_ignoreKindInUtcTimeSerialization); } } private static int s_limitXPathComplexity; public static bool LimitXPathComplexity { [MethodImpl(MethodImplOptions.AggressiveInlining)] get { return GetCachedSwitchValue("Switch.System.Xml.LimitXPathComplexity", ref s_limitXPathComplexity); } } private static int s_allowDefaultResolver; public static bool AllowDefaultResolver { [MethodImpl(MethodImplOptions.AggressiveInlining)] get { return GetCachedSwitchValue("Switch.System.Xml.AllowDefaultResolver", ref s_allowDefaultResolver); } } } }
-1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/Xor.Vector128.UInt16.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void Xor_Vector128_UInt16() { var test = new SimpleBinaryOpTest__Xor_Vector128_UInt16(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__Xor_Vector128_UInt16 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(UInt16[] inArray1, UInt16[] inArray2, UInt16[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt16>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt16>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt16>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt16, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt16, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<UInt16> _fld1; public Vector128<UInt16> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__Xor_Vector128_UInt16 testClass) { var result = AdvSimd.Xor(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__Xor_Vector128_UInt16 testClass) { fixed (Vector128<UInt16>* pFld1 = &_fld1) fixed (Vector128<UInt16>* pFld2 = &_fld2) { var result = AdvSimd.Xor( AdvSimd.LoadVector128((UInt16*)(pFld1)), AdvSimd.LoadVector128((UInt16*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<UInt16>>() / sizeof(UInt16); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<UInt16>>() / sizeof(UInt16); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<UInt16>>() / sizeof(UInt16); private static UInt16[] _data1 = new UInt16[Op1ElementCount]; private static UInt16[] _data2 = new UInt16[Op2ElementCount]; private static Vector128<UInt16> _clsVar1; private static Vector128<UInt16> _clsVar2; private Vector128<UInt16> _fld1; private Vector128<UInt16> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__Xor_Vector128_UInt16() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _clsVar1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _clsVar2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); } public SimpleBinaryOpTest__Xor_Vector128_UInt16() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _fld1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } _dataTable = new DataTable(_data1, _data2, new UInt16[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.Xor( Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.Xor( AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.Xor), new Type[] { typeof(Vector128<UInt16>), typeof(Vector128<UInt16>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.Xor), new Type[] { typeof(Vector128<UInt16>), typeof(Vector128<UInt16>) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.Xor( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<UInt16>* pClsVar1 = &_clsVar1) fixed (Vector128<UInt16>* pClsVar2 = &_clsVar2) { var result = AdvSimd.Xor( AdvSimd.LoadVector128((UInt16*)(pClsVar1)), AdvSimd.LoadVector128((UInt16*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray2Ptr); var result = AdvSimd.Xor(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray2Ptr)); var result = AdvSimd.Xor(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__Xor_Vector128_UInt16(); var result = AdvSimd.Xor(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__Xor_Vector128_UInt16(); fixed (Vector128<UInt16>* pFld1 = &test._fld1) fixed (Vector128<UInt16>* pFld2 = &test._fld2) { var result = AdvSimd.Xor( AdvSimd.LoadVector128((UInt16*)(pFld1)), AdvSimd.LoadVector128((UInt16*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.Xor(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<UInt16>* pFld1 = &_fld1) fixed (Vector128<UInt16>* pFld2 = &_fld2) { var result = AdvSimd.Xor( AdvSimd.LoadVector128((UInt16*)(pFld1)), AdvSimd.LoadVector128((UInt16*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.Xor(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.Xor( AdvSimd.LoadVector128((UInt16*)(&test._fld1)), AdvSimd.LoadVector128((UInt16*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<UInt16> op1, Vector128<UInt16> op2, void* result, [CallerMemberName] string method = "") { UInt16[] inArray1 = new UInt16[Op1ElementCount]; UInt16[] inArray2 = new UInt16[Op2ElementCount]; UInt16[] outArray = new UInt16[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { UInt16[] inArray1 = new UInt16[Op1ElementCount]; UInt16[] inArray2 = new UInt16[Op2ElementCount]; UInt16[] outArray = new UInt16[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(UInt16[] left, UInt16[] right, UInt16[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.Xor(left[i], right[i]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.Xor)}<UInt16>(Vector128<UInt16>, Vector128<UInt16>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void Xor_Vector128_UInt16() { var test = new SimpleBinaryOpTest__Xor_Vector128_UInt16(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__Xor_Vector128_UInt16 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(UInt16[] inArray1, UInt16[] inArray2, UInt16[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt16>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt16>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt16>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt16, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt16, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<UInt16> _fld1; public Vector128<UInt16> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__Xor_Vector128_UInt16 testClass) { var result = AdvSimd.Xor(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__Xor_Vector128_UInt16 testClass) { fixed (Vector128<UInt16>* pFld1 = &_fld1) fixed (Vector128<UInt16>* pFld2 = &_fld2) { var result = AdvSimd.Xor( AdvSimd.LoadVector128((UInt16*)(pFld1)), AdvSimd.LoadVector128((UInt16*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<UInt16>>() / sizeof(UInt16); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<UInt16>>() / sizeof(UInt16); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<UInt16>>() / sizeof(UInt16); private static UInt16[] _data1 = new UInt16[Op1ElementCount]; private static UInt16[] _data2 = new UInt16[Op2ElementCount]; private static Vector128<UInt16> _clsVar1; private static Vector128<UInt16> _clsVar2; private Vector128<UInt16> _fld1; private Vector128<UInt16> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__Xor_Vector128_UInt16() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _clsVar1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _clsVar2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); } public SimpleBinaryOpTest__Xor_Vector128_UInt16() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _fld1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } _dataTable = new DataTable(_data1, _data2, new UInt16[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.Xor( Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.Xor( AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.Xor), new Type[] { typeof(Vector128<UInt16>), typeof(Vector128<UInt16>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.Xor), new Type[] { typeof(Vector128<UInt16>), typeof(Vector128<UInt16>) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.Xor( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<UInt16>* pClsVar1 = &_clsVar1) fixed (Vector128<UInt16>* pClsVar2 = &_clsVar2) { var result = AdvSimd.Xor( AdvSimd.LoadVector128((UInt16*)(pClsVar1)), AdvSimd.LoadVector128((UInt16*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray2Ptr); var result = AdvSimd.Xor(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray2Ptr)); var result = AdvSimd.Xor(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__Xor_Vector128_UInt16(); var result = AdvSimd.Xor(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__Xor_Vector128_UInt16(); fixed (Vector128<UInt16>* pFld1 = &test._fld1) fixed (Vector128<UInt16>* pFld2 = &test._fld2) { var result = AdvSimd.Xor( AdvSimd.LoadVector128((UInt16*)(pFld1)), AdvSimd.LoadVector128((UInt16*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.Xor(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<UInt16>* pFld1 = &_fld1) fixed (Vector128<UInt16>* pFld2 = &_fld2) { var result = AdvSimd.Xor( AdvSimd.LoadVector128((UInt16*)(pFld1)), AdvSimd.LoadVector128((UInt16*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.Xor(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.Xor( AdvSimd.LoadVector128((UInt16*)(&test._fld1)), AdvSimd.LoadVector128((UInt16*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<UInt16> op1, Vector128<UInt16> op2, void* result, [CallerMemberName] string method = "") { UInt16[] inArray1 = new UInt16[Op1ElementCount]; UInt16[] inArray2 = new UInt16[Op2ElementCount]; UInt16[] outArray = new UInt16[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { UInt16[] inArray1 = new UInt16[Op1ElementCount]; UInt16[] inArray2 = new UInt16[Op2ElementCount]; UInt16[] outArray = new UInt16[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(UInt16[] left, UInt16[] right, UInt16[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.Xor(left[i], right[i]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.Xor)}<UInt16>(Vector128<UInt16>, Vector128<UInt16>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/AddHighNarrowingLower.Vector64.Int32.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void AddHighNarrowingLower_Vector64_Int32() { var test = new SimpleBinaryOpTest__AddHighNarrowingLower_Vector64_Int32(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__AddHighNarrowingLower_Vector64_Int32 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int64[] inArray1, Int64[] inArray2, Int32[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int64>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int64, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int64> _fld1; public Vector128<Int64> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__AddHighNarrowingLower_Vector64_Int32 testClass) { var result = AdvSimd.AddHighNarrowingLower(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__AddHighNarrowingLower_Vector64_Int32 testClass) { fixed (Vector128<Int64>* pFld1 = &_fld1) fixed (Vector128<Int64>* pFld2 = &_fld2) { var result = AdvSimd.AddHighNarrowingLower( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector128((Int64*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static Int64[] _data1 = new Int64[Op1ElementCount]; private static Int64[] _data2 = new Int64[Op2ElementCount]; private static Vector128<Int64> _clsVar1; private static Vector128<Int64> _clsVar2; private Vector128<Int64> _fld1; private Vector128<Int64> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__AddHighNarrowingLower_Vector64_Int32() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); } public SimpleBinaryOpTest__AddHighNarrowingLower_Vector64_Int32() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } _dataTable = new DataTable(_data1, _data2, new Int32[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.AddHighNarrowingLower( Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.AddHighNarrowingLower( AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AddHighNarrowingLower), new Type[] { typeof(Vector128<Int64>), typeof(Vector128<Int64>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AddHighNarrowingLower), new Type[] { typeof(Vector128<Int64>), typeof(Vector128<Int64>) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.AddHighNarrowingLower( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Int64>* pClsVar1 = &_clsVar1) fixed (Vector128<Int64>* pClsVar2 = &_clsVar2) { var result = AdvSimd.AddHighNarrowingLower( AdvSimd.LoadVector128((Int64*)(pClsVar1)), AdvSimd.LoadVector128((Int64*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr); var result = AdvSimd.AddHighNarrowingLower(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr)); var result = AdvSimd.AddHighNarrowingLower(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__AddHighNarrowingLower_Vector64_Int32(); var result = AdvSimd.AddHighNarrowingLower(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__AddHighNarrowingLower_Vector64_Int32(); fixed (Vector128<Int64>* pFld1 = &test._fld1) fixed (Vector128<Int64>* pFld2 = &test._fld2) { var result = AdvSimd.AddHighNarrowingLower( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector128((Int64*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.AddHighNarrowingLower(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Int64>* pFld1 = &_fld1) fixed (Vector128<Int64>* pFld2 = &_fld2) { var result = AdvSimd.AddHighNarrowingLower( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector128((Int64*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.AddHighNarrowingLower(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.AddHighNarrowingLower( AdvSimd.LoadVector128((Int64*)(&test._fld1)), AdvSimd.LoadVector128((Int64*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Int64> op1, Vector128<Int64> op2, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int64[] left, Int64[] right, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.AddHighNarrowing(left[i], right[i]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.AddHighNarrowingLower)}<Int32>(Vector128<Int64>, Vector128<Int64>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void AddHighNarrowingLower_Vector64_Int32() { var test = new SimpleBinaryOpTest__AddHighNarrowingLower_Vector64_Int32(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__AddHighNarrowingLower_Vector64_Int32 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int64[] inArray1, Int64[] inArray2, Int32[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int64>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int64, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int64> _fld1; public Vector128<Int64> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__AddHighNarrowingLower_Vector64_Int32 testClass) { var result = AdvSimd.AddHighNarrowingLower(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__AddHighNarrowingLower_Vector64_Int32 testClass) { fixed (Vector128<Int64>* pFld1 = &_fld1) fixed (Vector128<Int64>* pFld2 = &_fld2) { var result = AdvSimd.AddHighNarrowingLower( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector128((Int64*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static Int64[] _data1 = new Int64[Op1ElementCount]; private static Int64[] _data2 = new Int64[Op2ElementCount]; private static Vector128<Int64> _clsVar1; private static Vector128<Int64> _clsVar2; private Vector128<Int64> _fld1; private Vector128<Int64> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__AddHighNarrowingLower_Vector64_Int32() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); } public SimpleBinaryOpTest__AddHighNarrowingLower_Vector64_Int32() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } _dataTable = new DataTable(_data1, _data2, new Int32[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.AddHighNarrowingLower( Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.AddHighNarrowingLower( AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AddHighNarrowingLower), new Type[] { typeof(Vector128<Int64>), typeof(Vector128<Int64>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AddHighNarrowingLower), new Type[] { typeof(Vector128<Int64>), typeof(Vector128<Int64>) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.AddHighNarrowingLower( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Int64>* pClsVar1 = &_clsVar1) fixed (Vector128<Int64>* pClsVar2 = &_clsVar2) { var result = AdvSimd.AddHighNarrowingLower( AdvSimd.LoadVector128((Int64*)(pClsVar1)), AdvSimd.LoadVector128((Int64*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr); var result = AdvSimd.AddHighNarrowingLower(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr)); var result = AdvSimd.AddHighNarrowingLower(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__AddHighNarrowingLower_Vector64_Int32(); var result = AdvSimd.AddHighNarrowingLower(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__AddHighNarrowingLower_Vector64_Int32(); fixed (Vector128<Int64>* pFld1 = &test._fld1) fixed (Vector128<Int64>* pFld2 = &test._fld2) { var result = AdvSimd.AddHighNarrowingLower( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector128((Int64*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.AddHighNarrowingLower(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Int64>* pFld1 = &_fld1) fixed (Vector128<Int64>* pFld2 = &_fld2) { var result = AdvSimd.AddHighNarrowingLower( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector128((Int64*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.AddHighNarrowingLower(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.AddHighNarrowingLower( AdvSimd.LoadVector128((Int64*)(&test._fld1)), AdvSimd.LoadVector128((Int64*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Int64> op1, Vector128<Int64> op2, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int64[] left, Int64[] right, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.AddHighNarrowing(left[i], right[i]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.AddHighNarrowingLower)}<Int32>(Vector128<Int64>, Vector128<Int64>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/AddPairwiseWideningAndAdd.Vector128.SByte.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void AddPairwiseWideningAndAdd_Vector128_SByte() { var test = new SimpleBinaryOpTest__AddPairwiseWideningAndAdd_Vector128_SByte(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__AddPairwiseWideningAndAdd_Vector128_SByte { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int16[] inArray1, SByte[] inArray2, Int16[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int16>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<SByte>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int16>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int16, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<SByte, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int16> _fld1; public Vector128<SByte> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int16>, byte>(ref testStruct._fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref testStruct._fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__AddPairwiseWideningAndAdd_Vector128_SByte testClass) { var result = AdvSimd.AddPairwiseWideningAndAdd(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__AddPairwiseWideningAndAdd_Vector128_SByte testClass) { fixed (Vector128<Int16>* pFld1 = &_fld1) fixed (Vector128<SByte>* pFld2 = &_fld2) { var result = AdvSimd.AddPairwiseWideningAndAdd( AdvSimd.LoadVector128((Int16*)(pFld1)), AdvSimd.LoadVector128((SByte*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int16>>() / sizeof(Int16); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int16>>() / sizeof(Int16); private static Int16[] _data1 = new Int16[Op1ElementCount]; private static SByte[] _data2 = new SByte[Op2ElementCount]; private static Vector128<Int16> _clsVar1; private static Vector128<SByte> _clsVar2; private Vector128<Int16> _fld1; private Vector128<SByte> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__AddPairwiseWideningAndAdd_Vector128_SByte() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int16>, byte>(ref _clsVar1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _clsVar2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); } public SimpleBinaryOpTest__AddPairwiseWideningAndAdd_Vector128_SByte() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int16>, byte>(ref _fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } _dataTable = new DataTable(_data1, _data2, new Int16[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.AddPairwiseWideningAndAdd( Unsafe.Read<Vector128<Int16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.AddPairwiseWideningAndAdd( AdvSimd.LoadVector128((Int16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AddPairwiseWideningAndAdd), new Type[] { typeof(Vector128<Int16>), typeof(Vector128<SByte>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Int16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AddPairwiseWideningAndAdd), new Type[] { typeof(Vector128<Int16>), typeof(Vector128<SByte>) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Int16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.AddPairwiseWideningAndAdd( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Int16>* pClsVar1 = &_clsVar1) fixed (Vector128<SByte>* pClsVar2 = &_clsVar2) { var result = AdvSimd.AddPairwiseWideningAndAdd( AdvSimd.LoadVector128((Int16*)(pClsVar1)), AdvSimd.LoadVector128((SByte*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Int16>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr); var result = AdvSimd.AddPairwiseWideningAndAdd(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Int16*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr)); var result = AdvSimd.AddPairwiseWideningAndAdd(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__AddPairwiseWideningAndAdd_Vector128_SByte(); var result = AdvSimd.AddPairwiseWideningAndAdd(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__AddPairwiseWideningAndAdd_Vector128_SByte(); fixed (Vector128<Int16>* pFld1 = &test._fld1) fixed (Vector128<SByte>* pFld2 = &test._fld2) { var result = AdvSimd.AddPairwiseWideningAndAdd( AdvSimd.LoadVector128((Int16*)(pFld1)), AdvSimd.LoadVector128((SByte*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.AddPairwiseWideningAndAdd(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Int16>* pFld1 = &_fld1) fixed (Vector128<SByte>* pFld2 = &_fld2) { var result = AdvSimd.AddPairwiseWideningAndAdd( AdvSimd.LoadVector128((Int16*)(pFld1)), AdvSimd.LoadVector128((SByte*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.AddPairwiseWideningAndAdd(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.AddPairwiseWideningAndAdd( AdvSimd.LoadVector128((Int16*)(&test._fld1)), AdvSimd.LoadVector128((SByte*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Int16> op1, Vector128<SByte> op2, void* result, [CallerMemberName] string method = "") { Int16[] inArray1 = new Int16[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; Int16[] outArray = new Int16[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int16>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Int16[] inArray1 = new Int16[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; Int16[] outArray = new Int16[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int16>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int16[] left, SByte[] right, Int16[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.AddPairwiseWideningAndAdd(left, right, i) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.AddPairwiseWideningAndAdd)}<Int16>(Vector128<Int16>, Vector128<SByte>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void AddPairwiseWideningAndAdd_Vector128_SByte() { var test = new SimpleBinaryOpTest__AddPairwiseWideningAndAdd_Vector128_SByte(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__AddPairwiseWideningAndAdd_Vector128_SByte { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int16[] inArray1, SByte[] inArray2, Int16[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int16>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<SByte>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int16>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int16, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<SByte, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int16> _fld1; public Vector128<SByte> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int16>, byte>(ref testStruct._fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref testStruct._fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__AddPairwiseWideningAndAdd_Vector128_SByte testClass) { var result = AdvSimd.AddPairwiseWideningAndAdd(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__AddPairwiseWideningAndAdd_Vector128_SByte testClass) { fixed (Vector128<Int16>* pFld1 = &_fld1) fixed (Vector128<SByte>* pFld2 = &_fld2) { var result = AdvSimd.AddPairwiseWideningAndAdd( AdvSimd.LoadVector128((Int16*)(pFld1)), AdvSimd.LoadVector128((SByte*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int16>>() / sizeof(Int16); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int16>>() / sizeof(Int16); private static Int16[] _data1 = new Int16[Op1ElementCount]; private static SByte[] _data2 = new SByte[Op2ElementCount]; private static Vector128<Int16> _clsVar1; private static Vector128<SByte> _clsVar2; private Vector128<Int16> _fld1; private Vector128<SByte> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__AddPairwiseWideningAndAdd_Vector128_SByte() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int16>, byte>(ref _clsVar1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _clsVar2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); } public SimpleBinaryOpTest__AddPairwiseWideningAndAdd_Vector128_SByte() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int16>, byte>(ref _fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } _dataTable = new DataTable(_data1, _data2, new Int16[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.AddPairwiseWideningAndAdd( Unsafe.Read<Vector128<Int16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.AddPairwiseWideningAndAdd( AdvSimd.LoadVector128((Int16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AddPairwiseWideningAndAdd), new Type[] { typeof(Vector128<Int16>), typeof(Vector128<SByte>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Int16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AddPairwiseWideningAndAdd), new Type[] { typeof(Vector128<Int16>), typeof(Vector128<SByte>) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Int16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.AddPairwiseWideningAndAdd( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Int16>* pClsVar1 = &_clsVar1) fixed (Vector128<SByte>* pClsVar2 = &_clsVar2) { var result = AdvSimd.AddPairwiseWideningAndAdd( AdvSimd.LoadVector128((Int16*)(pClsVar1)), AdvSimd.LoadVector128((SByte*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Int16>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr); var result = AdvSimd.AddPairwiseWideningAndAdd(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Int16*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr)); var result = AdvSimd.AddPairwiseWideningAndAdd(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__AddPairwiseWideningAndAdd_Vector128_SByte(); var result = AdvSimd.AddPairwiseWideningAndAdd(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__AddPairwiseWideningAndAdd_Vector128_SByte(); fixed (Vector128<Int16>* pFld1 = &test._fld1) fixed (Vector128<SByte>* pFld2 = &test._fld2) { var result = AdvSimd.AddPairwiseWideningAndAdd( AdvSimd.LoadVector128((Int16*)(pFld1)), AdvSimd.LoadVector128((SByte*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.AddPairwiseWideningAndAdd(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Int16>* pFld1 = &_fld1) fixed (Vector128<SByte>* pFld2 = &_fld2) { var result = AdvSimd.AddPairwiseWideningAndAdd( AdvSimd.LoadVector128((Int16*)(pFld1)), AdvSimd.LoadVector128((SByte*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.AddPairwiseWideningAndAdd(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.AddPairwiseWideningAndAdd( AdvSimd.LoadVector128((Int16*)(&test._fld1)), AdvSimd.LoadVector128((SByte*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Int16> op1, Vector128<SByte> op2, void* result, [CallerMemberName] string method = "") { Int16[] inArray1 = new Int16[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; Int16[] outArray = new Int16[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int16>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Int16[] inArray1 = new Int16[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; Int16[] outArray = new Int16[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int16>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int16[] left, SByte[] right, Int16[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.AddPairwiseWideningAndAdd(left, right, i) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.AddPairwiseWideningAndAdd)}<Int16>(Vector128<Int16>, Vector128<SByte>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/tests/JIT/Regression/CLR-x86-JIT/v2.1/b609280/b609280.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/libraries/System.Security.AccessControl/tests/DiscretionaryAcl/DiscretionaryAcl_Purge.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Linq; using System.Security.Principal; using Xunit; namespace System.Security.AccessControl.Tests { public class DiscretionaryAcl_Purge { private static bool TestPurge(DiscretionaryAcl discretionaryAcl, SecurityIdentifier sid, int aceCount) { KnownAce ace = null; discretionaryAcl.Purge(sid); if (aceCount != discretionaryAcl.Count) return false; for (int i = 0; i < discretionaryAcl.Count; i++) { ace = discretionaryAcl[i] as KnownAce; if (ace != null && ((ace.AceFlags & AceFlags.Inherited) == 0)) { if (ace.SecurityIdentifier == sid) return false; } } return true; } [Fact] public static void Purge_BasicValidationTestCases() { bool isContainer = false; bool isDS = false; RawAcl rawAcl = null; DiscretionaryAcl discretionaryAcl = null; int aceCount = 0; SecurityIdentifier sid = null; GenericAce gAce = null; byte revision = 0; int capacity = 0; //CustomAce constructor parameters AceType aceType = AceType.AccessAllowed; AceFlags aceFlag = AceFlags.None; byte[] opaque = null; //CompoundAce constructor additional parameters int accessMask = 0; CompoundAceType compoundAceType = CompoundAceType.Impersonation; string sidStr = "LA"; //CommonAce constructor additional parameters AceQualifier aceQualifier = 0; //ObjectAce constructor additional parameters ObjectAceFlags objectAceFlag = 0; Guid objectAceType; Guid inheritedObjectAceType; //case 1, no Ace revision = 127; capacity = 1; rawAcl = new RawAcl(revision, capacity); isContainer = true; isDS = false; discretionaryAcl = new DiscretionaryAcl(isContainer, isDS, rawAcl); aceCount = 0; sidStr = "BG"; sid = new SecurityIdentifier(Utils.TranslateStringConstFormatSidToStandardFormatSid(sidStr)); Assert.True(TestPurge(discretionaryAcl, sid, aceCount)); //case 2, only have 1 explicit Ace of the sid revision = 0; capacity = 1; rawAcl = new RawAcl(revision, capacity); sidStr = "BG"; sid = new SecurityIdentifier(Utils.TranslateStringConstFormatSidToStandardFormatSid(sidStr)); //199 has all aceflags but inheritonly and inherited gAce = new CommonAce((AceFlags)199, AceQualifier.AccessAllowed, 1, sid, false, null); rawAcl.InsertAce(0, gAce); isContainer = false; isDS = false; discretionaryAcl = new DiscretionaryAcl(isContainer, isDS, rawAcl); aceCount = 0; Assert.True(TestPurge(discretionaryAcl, sid, aceCount)); //case 3, only have 1 explicit Ace of different sid revision = 0; capacity = 1; rawAcl = new RawAcl(revision, capacity); //199 has all aceflags but inheritedonly and inherited sidStr = "BG"; sid = new SecurityIdentifier(Utils.TranslateStringConstFormatSidToStandardFormatSid(sidStr)); gAce = new CommonAce((AceFlags)199, AceQualifier.AccessDenied, 1, sid, false, null); rawAcl.InsertAce(0, gAce); isContainer = false; isDS = false; discretionaryAcl = new DiscretionaryAcl(isContainer, isDS, rawAcl); aceCount = 1; sidStr = "BA"; sid = new SecurityIdentifier(Utils.TranslateStringConstFormatSidToStandardFormatSid(sidStr)); Assert.True(TestPurge(discretionaryAcl, sid, aceCount)); //case 4, only have 1 inherited Ace of the sid revision = 0; capacity = 1; rawAcl = new RawAcl(revision, capacity); sidStr = "BG"; sid = new SecurityIdentifier(Utils.TranslateStringConstFormatSidToStandardFormatSid(sidStr)); //215 has all aceflags but inheritedonly gAce = new CommonAce((AceFlags)215, AceQualifier.AccessAllowed, 1, sid, false, null); rawAcl.InsertAce(0, gAce); isContainer = false; isDS = false; discretionaryAcl = new DiscretionaryAcl(isContainer, isDS, rawAcl); aceCount = 1; Assert.True(TestPurge(discretionaryAcl, sid, aceCount)); //case 5, have one explicit Ace and one inherited Ace of the sid revision = 255; capacity = 1; rawAcl = new RawAcl(revision, capacity); sidStr = "BG"; sid = new SecurityIdentifier(Utils.TranslateStringConstFormatSidToStandardFormatSid(sidStr)); //199 has all aceflags but inheritedonly and inherited gAce = new CommonAce((AceFlags)(FlagsForAce.AuditFlags | FlagsForAce.OI | FlagsForAce.CI | FlagsForAce.NP), AceQualifier.AccessDenied, 1, sid, false, null); rawAcl.InsertAce(0, gAce); //215 has all aceflags but inheritedonly gAce = new CommonAce((AceFlags)(FlagsForAce.AuditFlags | FlagsForAce.OI | FlagsForAce.CI | FlagsForAce.NP | FlagsForAce.IH), AceQualifier.AccessAllowed, 2, sid, false, null); rawAcl.InsertAce(1, gAce); isContainer = true; isDS = false; discretionaryAcl = new DiscretionaryAcl(isContainer, isDS, rawAcl); aceCount = 1; Assert.True(TestPurge(discretionaryAcl, sid, aceCount)); //case 6, have two explicit Aces of the sid revision = 255; capacity = 1; rawAcl = new RawAcl(revision, capacity); sidStr = "BG"; sid = new SecurityIdentifier(Utils.TranslateStringConstFormatSidToStandardFormatSid(sidStr)); //207 has all AceFlags but inherited gAce = new CommonAce((AceFlags)207, AceQualifier.AccessAllowed, 1, sid, false, null); rawAcl.InsertAce(0, gAce); gAce = new CommonAce(AceFlags.None, AceQualifier.AccessDenied, 2, sid, false, null); rawAcl.InsertAce(0, gAce); isContainer = true; isDS = false; discretionaryAcl = new DiscretionaryAcl(isContainer, isDS, rawAcl); aceCount = 0; Assert.True(TestPurge(discretionaryAcl, sid, 0)); //case 7, 1 explicit CustomAce revision = 127; capacity = 1; rawAcl = new RawAcl(revision, capacity); aceType = AceType.MaxDefinedAceType + 1; //199 has all AceFlags except InheritOnly and Inherited aceFlag = (AceFlags)199; opaque = null; gAce = new CustomAce(aceType, aceFlag, opaque); rawAcl.InsertAce(0, gAce); isContainer = false; isDS = false; discretionaryAcl = new DiscretionaryAcl(isContainer, isDS, rawAcl); sid = new SecurityIdentifier(Utils.TranslateStringConstFormatSidToStandardFormatSid("BG")); aceCount = 1; //After Mark changes design to make ACL with any CustomAce, CompoundAce uncanonical and //forbid the modification on uncanonical ACL, this case will throw InvalidOperationException Assert.Throws<InvalidOperationException>(() => { TestPurge(discretionaryAcl, sid, aceCount); }); //case 8, 1 explicit CompoundAce revision = 127; capacity = 1; rawAcl = new RawAcl(revision, capacity); //207 has all AceFlags but inherited aceFlag = (AceFlags)207; accessMask = 1; compoundAceType = CompoundAceType.Impersonation; sid = new SecurityIdentifier(Utils.TranslateStringConstFormatSidToStandardFormatSid("BG")); gAce = new CompoundAce(aceFlag, accessMask, compoundAceType, sid); rawAcl.InsertAce(0, gAce); isContainer = true; isDS = false; discretionaryAcl = new DiscretionaryAcl(isContainer, isDS, rawAcl); aceCount = 0; //After Mark changes design to make ACL with any CustomAce, CompoundAce uncanonical and //forbid the modification on uncanonical ACL, this case will throw InvalidOperationException Assert.Throws<InvalidOperationException>(() => { TestPurge(discretionaryAcl, sid, aceCount); }); //case 9, 1 explicit ObjectAce revision = 127; capacity = 1; rawAcl = new RawAcl(revision, capacity); sid = new SecurityIdentifier(Utils.TranslateStringConstFormatSidToStandardFormatSid("BG")); //207 has all AceFlags but inherited aceFlag = (AceFlags)207; aceQualifier = AceQualifier.AccessAllowed; accessMask = 1; objectAceFlag = ObjectAceFlags.ObjectAceTypePresent | ObjectAceFlags.InheritedObjectAceTypePresent; objectAceType = new Guid("11111111-1111-1111-1111-111111111111"); inheritedObjectAceType = new Guid("22222222-2222-2222-2222-222222222222"); gAce = new ObjectAce(aceFlag, aceQualifier, accessMask, sid, objectAceFlag, objectAceType, inheritedObjectAceType, false, null); rawAcl.InsertAce(0, gAce); isContainer = true; isDS = true; discretionaryAcl = new DiscretionaryAcl(isContainer, isDS, rawAcl); aceCount = 0; Assert.True(TestPurge(discretionaryAcl, sid, aceCount)); } [Fact] public static void Purge_AdditionalTestCases() { bool isContainer = false; bool isDS = false; RawAcl rawAcl = null; DiscretionaryAcl discretionaryAcl = null; byte revision = 0; int capacity = 0; //case 1, null Sid Assert.Throws<ArgumentNullException>(() => { revision = 127; capacity = 1; rawAcl = new RawAcl(revision, capacity); isContainer = true; isDS = false; discretionaryAcl = new DiscretionaryAcl(isContainer, isDS, rawAcl); discretionaryAcl.Purge(null); }); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Linq; using System.Security.Principal; using Xunit; namespace System.Security.AccessControl.Tests { public class DiscretionaryAcl_Purge { private static bool TestPurge(DiscretionaryAcl discretionaryAcl, SecurityIdentifier sid, int aceCount) { KnownAce ace = null; discretionaryAcl.Purge(sid); if (aceCount != discretionaryAcl.Count) return false; for (int i = 0; i < discretionaryAcl.Count; i++) { ace = discretionaryAcl[i] as KnownAce; if (ace != null && ((ace.AceFlags & AceFlags.Inherited) == 0)) { if (ace.SecurityIdentifier == sid) return false; } } return true; } [Fact] public static void Purge_BasicValidationTestCases() { bool isContainer = false; bool isDS = false; RawAcl rawAcl = null; DiscretionaryAcl discretionaryAcl = null; int aceCount = 0; SecurityIdentifier sid = null; GenericAce gAce = null; byte revision = 0; int capacity = 0; //CustomAce constructor parameters AceType aceType = AceType.AccessAllowed; AceFlags aceFlag = AceFlags.None; byte[] opaque = null; //CompoundAce constructor additional parameters int accessMask = 0; CompoundAceType compoundAceType = CompoundAceType.Impersonation; string sidStr = "LA"; //CommonAce constructor additional parameters AceQualifier aceQualifier = 0; //ObjectAce constructor additional parameters ObjectAceFlags objectAceFlag = 0; Guid objectAceType; Guid inheritedObjectAceType; //case 1, no Ace revision = 127; capacity = 1; rawAcl = new RawAcl(revision, capacity); isContainer = true; isDS = false; discretionaryAcl = new DiscretionaryAcl(isContainer, isDS, rawAcl); aceCount = 0; sidStr = "BG"; sid = new SecurityIdentifier(Utils.TranslateStringConstFormatSidToStandardFormatSid(sidStr)); Assert.True(TestPurge(discretionaryAcl, sid, aceCount)); //case 2, only have 1 explicit Ace of the sid revision = 0; capacity = 1; rawAcl = new RawAcl(revision, capacity); sidStr = "BG"; sid = new SecurityIdentifier(Utils.TranslateStringConstFormatSidToStandardFormatSid(sidStr)); //199 has all aceflags but inheritonly and inherited gAce = new CommonAce((AceFlags)199, AceQualifier.AccessAllowed, 1, sid, false, null); rawAcl.InsertAce(0, gAce); isContainer = false; isDS = false; discretionaryAcl = new DiscretionaryAcl(isContainer, isDS, rawAcl); aceCount = 0; Assert.True(TestPurge(discretionaryAcl, sid, aceCount)); //case 3, only have 1 explicit Ace of different sid revision = 0; capacity = 1; rawAcl = new RawAcl(revision, capacity); //199 has all aceflags but inheritedonly and inherited sidStr = "BG"; sid = new SecurityIdentifier(Utils.TranslateStringConstFormatSidToStandardFormatSid(sidStr)); gAce = new CommonAce((AceFlags)199, AceQualifier.AccessDenied, 1, sid, false, null); rawAcl.InsertAce(0, gAce); isContainer = false; isDS = false; discretionaryAcl = new DiscretionaryAcl(isContainer, isDS, rawAcl); aceCount = 1; sidStr = "BA"; sid = new SecurityIdentifier(Utils.TranslateStringConstFormatSidToStandardFormatSid(sidStr)); Assert.True(TestPurge(discretionaryAcl, sid, aceCount)); //case 4, only have 1 inherited Ace of the sid revision = 0; capacity = 1; rawAcl = new RawAcl(revision, capacity); sidStr = "BG"; sid = new SecurityIdentifier(Utils.TranslateStringConstFormatSidToStandardFormatSid(sidStr)); //215 has all aceflags but inheritedonly gAce = new CommonAce((AceFlags)215, AceQualifier.AccessAllowed, 1, sid, false, null); rawAcl.InsertAce(0, gAce); isContainer = false; isDS = false; discretionaryAcl = new DiscretionaryAcl(isContainer, isDS, rawAcl); aceCount = 1; Assert.True(TestPurge(discretionaryAcl, sid, aceCount)); //case 5, have one explicit Ace and one inherited Ace of the sid revision = 255; capacity = 1; rawAcl = new RawAcl(revision, capacity); sidStr = "BG"; sid = new SecurityIdentifier(Utils.TranslateStringConstFormatSidToStandardFormatSid(sidStr)); //199 has all aceflags but inheritedonly and inherited gAce = new CommonAce((AceFlags)(FlagsForAce.AuditFlags | FlagsForAce.OI | FlagsForAce.CI | FlagsForAce.NP), AceQualifier.AccessDenied, 1, sid, false, null); rawAcl.InsertAce(0, gAce); //215 has all aceflags but inheritedonly gAce = new CommonAce((AceFlags)(FlagsForAce.AuditFlags | FlagsForAce.OI | FlagsForAce.CI | FlagsForAce.NP | FlagsForAce.IH), AceQualifier.AccessAllowed, 2, sid, false, null); rawAcl.InsertAce(1, gAce); isContainer = true; isDS = false; discretionaryAcl = new DiscretionaryAcl(isContainer, isDS, rawAcl); aceCount = 1; Assert.True(TestPurge(discretionaryAcl, sid, aceCount)); //case 6, have two explicit Aces of the sid revision = 255; capacity = 1; rawAcl = new RawAcl(revision, capacity); sidStr = "BG"; sid = new SecurityIdentifier(Utils.TranslateStringConstFormatSidToStandardFormatSid(sidStr)); //207 has all AceFlags but inherited gAce = new CommonAce((AceFlags)207, AceQualifier.AccessAllowed, 1, sid, false, null); rawAcl.InsertAce(0, gAce); gAce = new CommonAce(AceFlags.None, AceQualifier.AccessDenied, 2, sid, false, null); rawAcl.InsertAce(0, gAce); isContainer = true; isDS = false; discretionaryAcl = new DiscretionaryAcl(isContainer, isDS, rawAcl); aceCount = 0; Assert.True(TestPurge(discretionaryAcl, sid, 0)); //case 7, 1 explicit CustomAce revision = 127; capacity = 1; rawAcl = new RawAcl(revision, capacity); aceType = AceType.MaxDefinedAceType + 1; //199 has all AceFlags except InheritOnly and Inherited aceFlag = (AceFlags)199; opaque = null; gAce = new CustomAce(aceType, aceFlag, opaque); rawAcl.InsertAce(0, gAce); isContainer = false; isDS = false; discretionaryAcl = new DiscretionaryAcl(isContainer, isDS, rawAcl); sid = new SecurityIdentifier(Utils.TranslateStringConstFormatSidToStandardFormatSid("BG")); aceCount = 1; //After Mark changes design to make ACL with any CustomAce, CompoundAce uncanonical and //forbid the modification on uncanonical ACL, this case will throw InvalidOperationException Assert.Throws<InvalidOperationException>(() => { TestPurge(discretionaryAcl, sid, aceCount); }); //case 8, 1 explicit CompoundAce revision = 127; capacity = 1; rawAcl = new RawAcl(revision, capacity); //207 has all AceFlags but inherited aceFlag = (AceFlags)207; accessMask = 1; compoundAceType = CompoundAceType.Impersonation; sid = new SecurityIdentifier(Utils.TranslateStringConstFormatSidToStandardFormatSid("BG")); gAce = new CompoundAce(aceFlag, accessMask, compoundAceType, sid); rawAcl.InsertAce(0, gAce); isContainer = true; isDS = false; discretionaryAcl = new DiscretionaryAcl(isContainer, isDS, rawAcl); aceCount = 0; //After Mark changes design to make ACL with any CustomAce, CompoundAce uncanonical and //forbid the modification on uncanonical ACL, this case will throw InvalidOperationException Assert.Throws<InvalidOperationException>(() => { TestPurge(discretionaryAcl, sid, aceCount); }); //case 9, 1 explicit ObjectAce revision = 127; capacity = 1; rawAcl = new RawAcl(revision, capacity); sid = new SecurityIdentifier(Utils.TranslateStringConstFormatSidToStandardFormatSid("BG")); //207 has all AceFlags but inherited aceFlag = (AceFlags)207; aceQualifier = AceQualifier.AccessAllowed; accessMask = 1; objectAceFlag = ObjectAceFlags.ObjectAceTypePresent | ObjectAceFlags.InheritedObjectAceTypePresent; objectAceType = new Guid("11111111-1111-1111-1111-111111111111"); inheritedObjectAceType = new Guid("22222222-2222-2222-2222-222222222222"); gAce = new ObjectAce(aceFlag, aceQualifier, accessMask, sid, objectAceFlag, objectAceType, inheritedObjectAceType, false, null); rawAcl.InsertAce(0, gAce); isContainer = true; isDS = true; discretionaryAcl = new DiscretionaryAcl(isContainer, isDS, rawAcl); aceCount = 0; Assert.True(TestPurge(discretionaryAcl, sid, aceCount)); } [Fact] public static void Purge_AdditionalTestCases() { bool isContainer = false; bool isDS = false; RawAcl rawAcl = null; DiscretionaryAcl discretionaryAcl = null; byte revision = 0; int capacity = 0; //case 1, null Sid Assert.Throws<ArgumentNullException>(() => { revision = 127; capacity = 1; rawAcl = new RawAcl(revision, capacity); isContainer = true; isDS = false; discretionaryAcl = new DiscretionaryAcl(isContainer, isDS, rawAcl); discretionaryAcl.Purge(null); }); } } }
-1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/libraries/Common/src/System/Runtime/InteropServices/ComEventsMethod.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics; using System.Reflection; namespace System.Runtime.InteropServices { /// <summary> /// Part of ComEventHelpers APIs which allow binding /// managed delegates to COM's connection point based events. /// </summary> internal sealed class ComEventsMethod { /// <summary> /// This delegate wrapper class handles dynamic invocation of delegates. The reason for the wrapper's /// existence is that under certain circumstances we need to coerce arguments to types expected by the /// delegates signature. Normally, reflection (Delegate.DynamicInvoke) handles type coercion /// correctly but one known case is when the expected signature is 'ref Enum' - in this case /// reflection by design does not do the coercion. Since we need to be compatible with COM interop /// handling of this scenario - we are pre-processing delegate's signature by looking for 'ref enums' /// and cache the types required for such coercion. /// </summary> public sealed class DelegateWrapper { private bool _once; private int _expectedParamsCount; private Type?[]? _cachedTargetTypes; public DelegateWrapper(Delegate d, bool wrapArgs) { Delegate = d; WrapArgs = wrapArgs; } public Delegate Delegate { get; set; } public bool WrapArgs { get; } public object? Invoke(object[] args) { if (Delegate == null) { return null; } if (!_once) { PreProcessSignature(); _once = true; } if (_cachedTargetTypes != null && _expectedParamsCount == args.Length) { for (int i = 0; i < _expectedParamsCount; i++) { if (_cachedTargetTypes[i] is Type t) { args[i] = Enum.ToObject(t, args[i]); } } } return Delegate.DynamicInvoke(WrapArgs ? new object[] { args } : args); } private void PreProcessSignature() { ParameterInfo[] parameters = Delegate.Method.GetParameters(); _expectedParamsCount = parameters.Length; Type?[]? targetTypes = null; for (int i = 0; i < _expectedParamsCount; i++) { ParameterInfo pi = parameters[i]; // recognize only 'ref Enum' signatures and cache // both enum type and the underlying type. if (pi.ParameterType.IsByRef && pi.ParameterType.HasElementType && pi.ParameterType.GetElementType()!.IsEnum) { if (targetTypes == null) { targetTypes = new Type?[_expectedParamsCount]; } targetTypes[i] = pi.ParameterType.GetElementType(); } } if (targetTypes != null) { _cachedTargetTypes = targetTypes; } } } /// <summary> /// Invoking ComEventsMethod means invoking a multi-cast delegate attached to it. /// Since multicast delegate's built-in chaining supports only chaining instances of the same type, /// we need to complement this design by using an explicit linked list data structure. /// </summary> private readonly List<DelegateWrapper> _delegateWrappers = new List<DelegateWrapper>(); private readonly int _dispid; private ComEventsMethod? _next; public ComEventsMethod(int dispid) { _dispid = dispid; } public static ComEventsMethod? Find(ComEventsMethod? methods, int dispid) { while (methods != null && methods._dispid != dispid) { methods = methods._next; } return methods; } public static ComEventsMethod Add(ComEventsMethod? methods, ComEventsMethod method) { method._next = methods; return method; } public static ComEventsMethod? Remove(ComEventsMethod methods, ComEventsMethod method) { Debug.Assert(methods != null, "removing method from empty methods collection"); Debug.Assert(method != null, "specify method is null"); if (methods == method) { return methods._next; } else { ComEventsMethod? current = methods; while (current != null && current._next != method) { current = current._next; } if (current != null) { current._next = method._next; } return methods; } } public bool Empty { get { lock (_delegateWrappers) { return _delegateWrappers.Count == 0; } } } public void AddDelegate(Delegate d, bool wrapArgs = false) { lock (_delegateWrappers) { // Update an existing delegate wrapper foreach (DelegateWrapper wrapper in _delegateWrappers) { if (wrapper.Delegate.GetType() == d.GetType() && wrapper.WrapArgs == wrapArgs) { wrapper.Delegate = Delegate.Combine(wrapper.Delegate, d); return; } } var newWrapper = new DelegateWrapper(d, wrapArgs); _delegateWrappers.Add(newWrapper); } } public void RemoveDelegate(Delegate d, bool wrapArgs = false) { lock (_delegateWrappers) { // Find delegate wrapper index int removeIdx = -1; DelegateWrapper? wrapper = null; for (int i = 0; i < _delegateWrappers.Count; i++) { DelegateWrapper wrapperMaybe = _delegateWrappers[i]; if (wrapperMaybe.Delegate.GetType() == d.GetType() && wrapperMaybe.WrapArgs == wrapArgs) { removeIdx = i; wrapper = wrapperMaybe; break; } } if (removeIdx < 0) { // Not present in collection return; } // Update wrapper or remove from collection Delegate? newDelegate = Delegate.Remove(wrapper!.Delegate, d); if (newDelegate != null) { wrapper.Delegate = newDelegate; } else { _delegateWrappers.RemoveAt(removeIdx); } } } public void RemoveDelegates(Func<Delegate, bool> condition) { lock (_delegateWrappers) { // Find delegate wrapper indexes. Iterate in reverse such that the list to remove is sorted by high to low index. List<int> toRemove = new List<int>(); for (int i = _delegateWrappers.Count - 1; i >= 0; i--) { DelegateWrapper wrapper = _delegateWrappers[i]; Delegate[] invocationList = wrapper.Delegate.GetInvocationList(); foreach (Delegate delegateMaybe in invocationList) { if (condition(delegateMaybe)) { Delegate? newDelegate = Delegate.Remove(wrapper!.Delegate, delegateMaybe); if (newDelegate != null) { wrapper.Delegate = newDelegate; } else { toRemove.Add(i); } } } } foreach (int idx in toRemove) { _delegateWrappers.RemoveAt(idx); } } } public object? Invoke(object[] args) { Debug.Assert(!Empty); object? result = null; lock (_delegateWrappers) { foreach (DelegateWrapper wrapper in _delegateWrappers) { result = wrapper.Invoke(args); } } return result; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics; using System.Reflection; namespace System.Runtime.InteropServices { /// <summary> /// Part of ComEventHelpers APIs which allow binding /// managed delegates to COM's connection point based events. /// </summary> internal sealed class ComEventsMethod { /// <summary> /// This delegate wrapper class handles dynamic invocation of delegates. The reason for the wrapper's /// existence is that under certain circumstances we need to coerce arguments to types expected by the /// delegates signature. Normally, reflection (Delegate.DynamicInvoke) handles type coercion /// correctly but one known case is when the expected signature is 'ref Enum' - in this case /// reflection by design does not do the coercion. Since we need to be compatible with COM interop /// handling of this scenario - we are pre-processing delegate's signature by looking for 'ref enums' /// and cache the types required for such coercion. /// </summary> public sealed class DelegateWrapper { private bool _once; private int _expectedParamsCount; private Type?[]? _cachedTargetTypes; public DelegateWrapper(Delegate d, bool wrapArgs) { Delegate = d; WrapArgs = wrapArgs; } public Delegate Delegate { get; set; } public bool WrapArgs { get; } public object? Invoke(object[] args) { if (Delegate == null) { return null; } if (!_once) { PreProcessSignature(); _once = true; } if (_cachedTargetTypes != null && _expectedParamsCount == args.Length) { for (int i = 0; i < _expectedParamsCount; i++) { if (_cachedTargetTypes[i] is Type t) { args[i] = Enum.ToObject(t, args[i]); } } } return Delegate.DynamicInvoke(WrapArgs ? new object[] { args } : args); } private void PreProcessSignature() { ParameterInfo[] parameters = Delegate.Method.GetParameters(); _expectedParamsCount = parameters.Length; Type?[]? targetTypes = null; for (int i = 0; i < _expectedParamsCount; i++) { ParameterInfo pi = parameters[i]; // recognize only 'ref Enum' signatures and cache // both enum type and the underlying type. if (pi.ParameterType.IsByRef && pi.ParameterType.HasElementType && pi.ParameterType.GetElementType()!.IsEnum) { if (targetTypes == null) { targetTypes = new Type?[_expectedParamsCount]; } targetTypes[i] = pi.ParameterType.GetElementType(); } } if (targetTypes != null) { _cachedTargetTypes = targetTypes; } } } /// <summary> /// Invoking ComEventsMethod means invoking a multi-cast delegate attached to it. /// Since multicast delegate's built-in chaining supports only chaining instances of the same type, /// we need to complement this design by using an explicit linked list data structure. /// </summary> private readonly List<DelegateWrapper> _delegateWrappers = new List<DelegateWrapper>(); private readonly int _dispid; private ComEventsMethod? _next; public ComEventsMethod(int dispid) { _dispid = dispid; } public static ComEventsMethod? Find(ComEventsMethod? methods, int dispid) { while (methods != null && methods._dispid != dispid) { methods = methods._next; } return methods; } public static ComEventsMethod Add(ComEventsMethod? methods, ComEventsMethod method) { method._next = methods; return method; } public static ComEventsMethod? Remove(ComEventsMethod methods, ComEventsMethod method) { Debug.Assert(methods != null, "removing method from empty methods collection"); Debug.Assert(method != null, "specify method is null"); if (methods == method) { return methods._next; } else { ComEventsMethod? current = methods; while (current != null && current._next != method) { current = current._next; } if (current != null) { current._next = method._next; } return methods; } } public bool Empty { get { lock (_delegateWrappers) { return _delegateWrappers.Count == 0; } } } public void AddDelegate(Delegate d, bool wrapArgs = false) { lock (_delegateWrappers) { // Update an existing delegate wrapper foreach (DelegateWrapper wrapper in _delegateWrappers) { if (wrapper.Delegate.GetType() == d.GetType() && wrapper.WrapArgs == wrapArgs) { wrapper.Delegate = Delegate.Combine(wrapper.Delegate, d); return; } } var newWrapper = new DelegateWrapper(d, wrapArgs); _delegateWrappers.Add(newWrapper); } } public void RemoveDelegate(Delegate d, bool wrapArgs = false) { lock (_delegateWrappers) { // Find delegate wrapper index int removeIdx = -1; DelegateWrapper? wrapper = null; for (int i = 0; i < _delegateWrappers.Count; i++) { DelegateWrapper wrapperMaybe = _delegateWrappers[i]; if (wrapperMaybe.Delegate.GetType() == d.GetType() && wrapperMaybe.WrapArgs == wrapArgs) { removeIdx = i; wrapper = wrapperMaybe; break; } } if (removeIdx < 0) { // Not present in collection return; } // Update wrapper or remove from collection Delegate? newDelegate = Delegate.Remove(wrapper!.Delegate, d); if (newDelegate != null) { wrapper.Delegate = newDelegate; } else { _delegateWrappers.RemoveAt(removeIdx); } } } public void RemoveDelegates(Func<Delegate, bool> condition) { lock (_delegateWrappers) { // Find delegate wrapper indexes. Iterate in reverse such that the list to remove is sorted by high to low index. List<int> toRemove = new List<int>(); for (int i = _delegateWrappers.Count - 1; i >= 0; i--) { DelegateWrapper wrapper = _delegateWrappers[i]; Delegate[] invocationList = wrapper.Delegate.GetInvocationList(); foreach (Delegate delegateMaybe in invocationList) { if (condition(delegateMaybe)) { Delegate? newDelegate = Delegate.Remove(wrapper!.Delegate, delegateMaybe); if (newDelegate != null) { wrapper.Delegate = newDelegate; } else { toRemove.Add(i); } } } } foreach (int idx in toRemove) { _delegateWrappers.RemoveAt(idx); } } } public object? Invoke(object[] args) { Debug.Assert(!Empty); object? result = null; lock (_delegateWrappers) { foreach (DelegateWrapper wrapper in _delegateWrappers) { result = wrapper.Invoke(args); } } return result; } } }
-1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/tests/JIT/HardwareIntrinsics/General/NotSupported/Vector128BooleanZero.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\General\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void Vector128BooleanZero() { bool succeeded = false; try { Vector128<bool> result = Vector128<bool>.Zero; } catch (NotSupportedException) { succeeded = true; } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"Vector128BooleanZero: RunNotSupportedScenario failed to throw NotSupportedException."); TestLibrary.TestFramework.LogInformation(string.Empty); throw new Exception("One or more scenarios did not complete as expected."); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\General\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void Vector128BooleanZero() { bool succeeded = false; try { Vector128<bool> result = Vector128<bool>.Zero; } catch (NotSupportedException) { succeeded = true; } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"Vector128BooleanZero: RunNotSupportedScenario failed to throw NotSupportedException."); TestLibrary.TestFramework.LogInformation(string.Empty); throw new Exception("One or more scenarios did not complete as expected."); } } } }
-1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/tests/GC/API/GC/KeepAliveNull.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // Tests KeepAlive() using System; using System.Runtime.CompilerServices; public class Test_KeepAliveNull { public static bool visited; public class Dummy { ~Dummy() { Console.WriteLine("In Finalize() of Dummy"); visited = true; } } public class CreateObj { public Dummy obj; public CreateObj() { obj = new Dummy(); } [MethodImplAttribute(MethodImplOptions.NoInlining)] public void DestroyObj() { obj = null; // this will collect the obj even if we have KeepAlive() } public void RunTest() { DestroyObj(); GC.Collect(); GC.WaitForPendingFinalizers(); GC.Collect(); GC.KeepAlive(obj); // will keep alive 'obj' till this point } } public static int Main() { CreateObj temp = new CreateObj(); temp.RunTest(); if (visited) { Console.WriteLine("Test for KeepAlive() passed!"); return 100; } else { Console.WriteLine("Test for KeepAlive() failed!"); return 1; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // Tests KeepAlive() using System; using System.Runtime.CompilerServices; public class Test_KeepAliveNull { public static bool visited; public class Dummy { ~Dummy() { Console.WriteLine("In Finalize() of Dummy"); visited = true; } } public class CreateObj { public Dummy obj; public CreateObj() { obj = new Dummy(); } [MethodImplAttribute(MethodImplOptions.NoInlining)] public void DestroyObj() { obj = null; // this will collect the obj even if we have KeepAlive() } public void RunTest() { DestroyObj(); GC.Collect(); GC.WaitForPendingFinalizers(); GC.Collect(); GC.KeepAlive(obj); // will keep alive 'obj' till this point } } public static int Main() { CreateObj temp = new CreateObj(); temp.RunTest(); if (visited) { Console.WriteLine("Test for KeepAlive() passed!"); return 100; } else { Console.WriteLine("Test for KeepAlive() failed!"); return 1; } } }
-1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/tests/JIT/HardwareIntrinsics/X86/Bmi1.X64/GetMaskUpToLowestSetBit.UInt64.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void GetMaskUpToLowestSetBitUInt64() { var test = new ScalarUnaryOpTest__GetMaskUpToLowestSetBitUInt64(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.ReadUnaligned test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.ReadUnaligned test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.ReadUnaligned test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ScalarUnaryOpTest__GetMaskUpToLowestSetBitUInt64 { private struct TestStruct { public UInt64 _fld; public static TestStruct Create() { var testStruct = new TestStruct(); testStruct._fld = TestLibrary.Generator.GetUInt64(); return testStruct; } public void RunStructFldScenario(ScalarUnaryOpTest__GetMaskUpToLowestSetBitUInt64 testClass) { var result = Bmi1.X64.GetMaskUpToLowestSetBit(_fld); testClass.ValidateResult(_fld, result); } } private static UInt64 _data; private static UInt64 _clsVar; private UInt64 _fld; static ScalarUnaryOpTest__GetMaskUpToLowestSetBitUInt64() { _clsVar = TestLibrary.Generator.GetUInt64(); } public ScalarUnaryOpTest__GetMaskUpToLowestSetBitUInt64() { Succeeded = true; _fld = TestLibrary.Generator.GetUInt64(); _data = TestLibrary.Generator.GetUInt64(); } public bool IsSupported => Bmi1.X64.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Bmi1.X64.GetMaskUpToLowestSetBit( Unsafe.ReadUnaligned<UInt64>(ref Unsafe.As<UInt64, byte>(ref _data)) ); ValidateResult(_data, result); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Bmi1.X64).GetMethod(nameof(Bmi1.X64.GetMaskUpToLowestSetBit), new Type[] { typeof(UInt64) }) .Invoke(null, new object[] { Unsafe.ReadUnaligned<UInt64>(ref Unsafe.As<UInt64, byte>(ref _data)) }); ValidateResult(_data, (UInt64)result); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Bmi1.X64.GetMaskUpToLowestSetBit( _clsVar ); ValidateResult(_clsVar, result); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var data = Unsafe.ReadUnaligned<UInt64>(ref Unsafe.As<UInt64, byte>(ref _data)); var result = Bmi1.X64.GetMaskUpToLowestSetBit(data); ValidateResult(data, result); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ScalarUnaryOpTest__GetMaskUpToLowestSetBitUInt64(); var result = Bmi1.X64.GetMaskUpToLowestSetBit(test._fld); ValidateResult(test._fld, result); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Bmi1.X64.GetMaskUpToLowestSetBit(_fld); ValidateResult(_fld, result); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Bmi1.X64.GetMaskUpToLowestSetBit(test._fld); ValidateResult(test._fld, result); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(UInt64 data, UInt64 result, [CallerMemberName] string method = "") { var isUnexpectedResult = false; isUnexpectedResult = (((data - 1) ^ data) != result); if (isUnexpectedResult) { TestLibrary.TestFramework.LogInformation($"{nameof(Bmi1.X64)}.{nameof(Bmi1.X64.GetMaskUpToLowestSetBit)}<UInt64>(UInt64): GetMaskUpToLowestSetBit failed:"); TestLibrary.TestFramework.LogInformation($" data: {data}"); TestLibrary.TestFramework.LogInformation($" result: {result}"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void GetMaskUpToLowestSetBitUInt64() { var test = new ScalarUnaryOpTest__GetMaskUpToLowestSetBitUInt64(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.ReadUnaligned test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.ReadUnaligned test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.ReadUnaligned test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ScalarUnaryOpTest__GetMaskUpToLowestSetBitUInt64 { private struct TestStruct { public UInt64 _fld; public static TestStruct Create() { var testStruct = new TestStruct(); testStruct._fld = TestLibrary.Generator.GetUInt64(); return testStruct; } public void RunStructFldScenario(ScalarUnaryOpTest__GetMaskUpToLowestSetBitUInt64 testClass) { var result = Bmi1.X64.GetMaskUpToLowestSetBit(_fld); testClass.ValidateResult(_fld, result); } } private static UInt64 _data; private static UInt64 _clsVar; private UInt64 _fld; static ScalarUnaryOpTest__GetMaskUpToLowestSetBitUInt64() { _clsVar = TestLibrary.Generator.GetUInt64(); } public ScalarUnaryOpTest__GetMaskUpToLowestSetBitUInt64() { Succeeded = true; _fld = TestLibrary.Generator.GetUInt64(); _data = TestLibrary.Generator.GetUInt64(); } public bool IsSupported => Bmi1.X64.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Bmi1.X64.GetMaskUpToLowestSetBit( Unsafe.ReadUnaligned<UInt64>(ref Unsafe.As<UInt64, byte>(ref _data)) ); ValidateResult(_data, result); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Bmi1.X64).GetMethod(nameof(Bmi1.X64.GetMaskUpToLowestSetBit), new Type[] { typeof(UInt64) }) .Invoke(null, new object[] { Unsafe.ReadUnaligned<UInt64>(ref Unsafe.As<UInt64, byte>(ref _data)) }); ValidateResult(_data, (UInt64)result); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Bmi1.X64.GetMaskUpToLowestSetBit( _clsVar ); ValidateResult(_clsVar, result); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var data = Unsafe.ReadUnaligned<UInt64>(ref Unsafe.As<UInt64, byte>(ref _data)); var result = Bmi1.X64.GetMaskUpToLowestSetBit(data); ValidateResult(data, result); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ScalarUnaryOpTest__GetMaskUpToLowestSetBitUInt64(); var result = Bmi1.X64.GetMaskUpToLowestSetBit(test._fld); ValidateResult(test._fld, result); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Bmi1.X64.GetMaskUpToLowestSetBit(_fld); ValidateResult(_fld, result); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Bmi1.X64.GetMaskUpToLowestSetBit(test._fld); ValidateResult(test._fld, result); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(UInt64 data, UInt64 result, [CallerMemberName] string method = "") { var isUnexpectedResult = false; isUnexpectedResult = (((data - 1) ^ data) != result); if (isUnexpectedResult) { TestLibrary.TestFramework.LogInformation($"{nameof(Bmi1.X64)}.{nameof(Bmi1.X64.GetMaskUpToLowestSetBit)}<UInt64>(UInt64): GetMaskUpToLowestSetBit failed:"); TestLibrary.TestFramework.LogInformation($" data: {data}"); TestLibrary.TestFramework.LogInformation($" result: {result}"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd.Arm64/CompareEqualScalar.Vector64.Double.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void CompareEqualScalar_Vector64_Double() { var test = new SimpleBinaryOpTest__CompareEqualScalar_Vector64_Double(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__CompareEqualScalar_Vector64_Double { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Double[] inArray1, Double[] inArray2, Double[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Double>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Double>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Double>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Double, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Double, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Double> _fld1; public Vector64<Double> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Double>, byte>(ref testStruct._fld1), ref Unsafe.As<Double, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Double>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Double>, byte>(ref testStruct._fld2), ref Unsafe.As<Double, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Double>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__CompareEqualScalar_Vector64_Double testClass) { var result = AdvSimd.Arm64.CompareEqualScalar(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__CompareEqualScalar_Vector64_Double testClass) { fixed (Vector64<Double>* pFld1 = &_fld1) fixed (Vector64<Double>* pFld2 = &_fld2) { var result = AdvSimd.Arm64.CompareEqualScalar( AdvSimd.LoadVector64((Double*)(pFld1)), AdvSimd.LoadVector64((Double*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Double>>() / sizeof(Double); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Double>>() / sizeof(Double); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Double>>() / sizeof(Double); private static Double[] _data1 = new Double[Op1ElementCount]; private static Double[] _data2 = new Double[Op2ElementCount]; private static Vector64<Double> _clsVar1; private static Vector64<Double> _clsVar2; private Vector64<Double> _fld1; private Vector64<Double> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__CompareEqualScalar_Vector64_Double() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Double>, byte>(ref _clsVar1), ref Unsafe.As<Double, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Double>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Double>, byte>(ref _clsVar2), ref Unsafe.As<Double, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Double>>()); } public SimpleBinaryOpTest__CompareEqualScalar_Vector64_Double() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Double>, byte>(ref _fld1), ref Unsafe.As<Double, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Double>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Double>, byte>(ref _fld2), ref Unsafe.As<Double, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Double>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetDouble(); } _dataTable = new DataTable(_data1, _data2, new Double[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.Arm64.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.Arm64.CompareEqualScalar( Unsafe.Read<Vector64<Double>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Double>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.Arm64.CompareEqualScalar( AdvSimd.LoadVector64((Double*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Double*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.CompareEqualScalar), new Type[] { typeof(Vector64<Double>), typeof(Vector64<Double>) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Double>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Double>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Double>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.CompareEqualScalar), new Type[] { typeof(Vector64<Double>), typeof(Vector64<Double>) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((Double*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Double*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Double>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.Arm64.CompareEqualScalar( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<Double>* pClsVar1 = &_clsVar1) fixed (Vector64<Double>* pClsVar2 = &_clsVar2) { var result = AdvSimd.Arm64.CompareEqualScalar( AdvSimd.LoadVector64((Double*)(pClsVar1)), AdvSimd.LoadVector64((Double*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Double>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<Double>>(_dataTable.inArray2Ptr); var result = AdvSimd.Arm64.CompareEqualScalar(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((Double*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector64((Double*)(_dataTable.inArray2Ptr)); var result = AdvSimd.Arm64.CompareEqualScalar(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__CompareEqualScalar_Vector64_Double(); var result = AdvSimd.Arm64.CompareEqualScalar(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__CompareEqualScalar_Vector64_Double(); fixed (Vector64<Double>* pFld1 = &test._fld1) fixed (Vector64<Double>* pFld2 = &test._fld2) { var result = AdvSimd.Arm64.CompareEqualScalar( AdvSimd.LoadVector64((Double*)(pFld1)), AdvSimd.LoadVector64((Double*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.Arm64.CompareEqualScalar(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<Double>* pFld1 = &_fld1) fixed (Vector64<Double>* pFld2 = &_fld2) { var result = AdvSimd.Arm64.CompareEqualScalar( AdvSimd.LoadVector64((Double*)(pFld1)), AdvSimd.LoadVector64((Double*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.Arm64.CompareEqualScalar(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.Arm64.CompareEqualScalar( AdvSimd.LoadVector64((Double*)(&test._fld1)), AdvSimd.LoadVector64((Double*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<Double> op1, Vector64<Double> op2, void* result, [CallerMemberName] string method = "") { Double[] inArray1 = new Double[Op1ElementCount]; Double[] inArray2 = new Double[Op2ElementCount]; Double[] outArray = new Double[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Double, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Double, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Double>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Double[] inArray1 = new Double[Op1ElementCount]; Double[] inArray2 = new Double[Op2ElementCount]; Double[] outArray = new Double[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Double>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Double>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Double>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Double[] left, Double[] right, Double[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (BitConverter.DoubleToInt64Bits(Helpers.CompareEqual(left[0], right[0])) != BitConverter.DoubleToInt64Bits(result[0])) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (BitConverter.DoubleToInt64Bits(result[i]) != 0) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd.Arm64)}.{nameof(AdvSimd.Arm64.CompareEqualScalar)}<Double>(Vector64<Double>, Vector64<Double>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void CompareEqualScalar_Vector64_Double() { var test = new SimpleBinaryOpTest__CompareEqualScalar_Vector64_Double(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__CompareEqualScalar_Vector64_Double { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Double[] inArray1, Double[] inArray2, Double[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Double>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Double>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Double>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Double, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Double, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Double> _fld1; public Vector64<Double> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Double>, byte>(ref testStruct._fld1), ref Unsafe.As<Double, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Double>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Double>, byte>(ref testStruct._fld2), ref Unsafe.As<Double, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Double>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__CompareEqualScalar_Vector64_Double testClass) { var result = AdvSimd.Arm64.CompareEqualScalar(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__CompareEqualScalar_Vector64_Double testClass) { fixed (Vector64<Double>* pFld1 = &_fld1) fixed (Vector64<Double>* pFld2 = &_fld2) { var result = AdvSimd.Arm64.CompareEqualScalar( AdvSimd.LoadVector64((Double*)(pFld1)), AdvSimd.LoadVector64((Double*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Double>>() / sizeof(Double); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Double>>() / sizeof(Double); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Double>>() / sizeof(Double); private static Double[] _data1 = new Double[Op1ElementCount]; private static Double[] _data2 = new Double[Op2ElementCount]; private static Vector64<Double> _clsVar1; private static Vector64<Double> _clsVar2; private Vector64<Double> _fld1; private Vector64<Double> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__CompareEqualScalar_Vector64_Double() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Double>, byte>(ref _clsVar1), ref Unsafe.As<Double, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Double>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Double>, byte>(ref _clsVar2), ref Unsafe.As<Double, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Double>>()); } public SimpleBinaryOpTest__CompareEqualScalar_Vector64_Double() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Double>, byte>(ref _fld1), ref Unsafe.As<Double, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Double>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Double>, byte>(ref _fld2), ref Unsafe.As<Double, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Double>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetDouble(); } _dataTable = new DataTable(_data1, _data2, new Double[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.Arm64.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.Arm64.CompareEqualScalar( Unsafe.Read<Vector64<Double>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Double>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.Arm64.CompareEqualScalar( AdvSimd.LoadVector64((Double*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Double*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.CompareEqualScalar), new Type[] { typeof(Vector64<Double>), typeof(Vector64<Double>) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Double>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Double>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Double>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.CompareEqualScalar), new Type[] { typeof(Vector64<Double>), typeof(Vector64<Double>) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((Double*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Double*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Double>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.Arm64.CompareEqualScalar( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<Double>* pClsVar1 = &_clsVar1) fixed (Vector64<Double>* pClsVar2 = &_clsVar2) { var result = AdvSimd.Arm64.CompareEqualScalar( AdvSimd.LoadVector64((Double*)(pClsVar1)), AdvSimd.LoadVector64((Double*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Double>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<Double>>(_dataTable.inArray2Ptr); var result = AdvSimd.Arm64.CompareEqualScalar(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((Double*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector64((Double*)(_dataTable.inArray2Ptr)); var result = AdvSimd.Arm64.CompareEqualScalar(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__CompareEqualScalar_Vector64_Double(); var result = AdvSimd.Arm64.CompareEqualScalar(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__CompareEqualScalar_Vector64_Double(); fixed (Vector64<Double>* pFld1 = &test._fld1) fixed (Vector64<Double>* pFld2 = &test._fld2) { var result = AdvSimd.Arm64.CompareEqualScalar( AdvSimd.LoadVector64((Double*)(pFld1)), AdvSimd.LoadVector64((Double*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.Arm64.CompareEqualScalar(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<Double>* pFld1 = &_fld1) fixed (Vector64<Double>* pFld2 = &_fld2) { var result = AdvSimd.Arm64.CompareEqualScalar( AdvSimd.LoadVector64((Double*)(pFld1)), AdvSimd.LoadVector64((Double*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.Arm64.CompareEqualScalar(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.Arm64.CompareEqualScalar( AdvSimd.LoadVector64((Double*)(&test._fld1)), AdvSimd.LoadVector64((Double*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<Double> op1, Vector64<Double> op2, void* result, [CallerMemberName] string method = "") { Double[] inArray1 = new Double[Op1ElementCount]; Double[] inArray2 = new Double[Op2ElementCount]; Double[] outArray = new Double[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Double, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Double, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Double>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Double[] inArray1 = new Double[Op1ElementCount]; Double[] inArray2 = new Double[Op2ElementCount]; Double[] outArray = new Double[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Double>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Double>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Double>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Double[] left, Double[] right, Double[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (BitConverter.DoubleToInt64Bits(Helpers.CompareEqual(left[0], right[0])) != BitConverter.DoubleToInt64Bits(result[0])) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (BitConverter.DoubleToInt64Bits(result[i]) != 0) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd.Arm64)}.{nameof(AdvSimd.Arm64.CompareEqualScalar)}<Double>(Vector64<Double>, Vector64<Double>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/libraries/System.Reflection.Metadata/src/System/Reflection/Metadata/TypeSystem/GenericParameterConstraint.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; namespace System.Reflection.Metadata { public readonly struct GenericParameterConstraint { private readonly MetadataReader _reader; // Workaround: JIT doesn't generate good code for nested structures, so use RowId. private readonly int _rowId; internal GenericParameterConstraint(MetadataReader reader, GenericParameterConstraintHandle handle) { Debug.Assert(reader != null); Debug.Assert(!handle.IsNil); _reader = reader; _rowId = handle.RowId; } private GenericParameterConstraintHandle Handle { get { return GenericParameterConstraintHandle.FromRowId(_rowId); } } /// <summary> /// The constrained <see cref="GenericParameterHandle"/>. /// </summary> /// <remarks> /// Corresponds to Owner field of GenericParamConstraint table in ECMA-335 Standard. /// </remarks> public GenericParameterHandle Parameter { get { return _reader.GenericParamConstraintTable.GetOwner(Handle); } } /// <summary> /// Handle (<see cref="TypeDefinitionHandle"/>, <see cref="TypeReferenceHandle"/>, or <see cref="TypeSpecificationHandle"/>) /// specifying from which type this generic parameter is constrained to derive, /// or which interface this generic parameter is constrained to implement. /// </summary> /// <remarks> /// Corresponds to Constraint field of GenericParamConstraint table in ECMA-335 Standard. /// </remarks> public EntityHandle Type { get { return _reader.GenericParamConstraintTable.GetConstraint(Handle); } } public CustomAttributeHandleCollection GetCustomAttributes() { return new CustomAttributeHandleCollection(_reader, Handle); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; namespace System.Reflection.Metadata { public readonly struct GenericParameterConstraint { private readonly MetadataReader _reader; // Workaround: JIT doesn't generate good code for nested structures, so use RowId. private readonly int _rowId; internal GenericParameterConstraint(MetadataReader reader, GenericParameterConstraintHandle handle) { Debug.Assert(reader != null); Debug.Assert(!handle.IsNil); _reader = reader; _rowId = handle.RowId; } private GenericParameterConstraintHandle Handle { get { return GenericParameterConstraintHandle.FromRowId(_rowId); } } /// <summary> /// The constrained <see cref="GenericParameterHandle"/>. /// </summary> /// <remarks> /// Corresponds to Owner field of GenericParamConstraint table in ECMA-335 Standard. /// </remarks> public GenericParameterHandle Parameter { get { return _reader.GenericParamConstraintTable.GetOwner(Handle); } } /// <summary> /// Handle (<see cref="TypeDefinitionHandle"/>, <see cref="TypeReferenceHandle"/>, or <see cref="TypeSpecificationHandle"/>) /// specifying from which type this generic parameter is constrained to derive, /// or which interface this generic parameter is constrained to implement. /// </summary> /// <remarks> /// Corresponds to Constraint field of GenericParamConstraint table in ECMA-335 Standard. /// </remarks> public EntityHandle Type { get { return _reader.GenericParamConstraintTable.GetConstraint(Handle); } } public CustomAttributeHandleCollection GetCustomAttributes() { return new CustomAttributeHandleCollection(_reader, Handle); } } }
-1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/coreclr/tools/Common/TypeSystem/Ecma/PrimitiveTypeProvider.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Reflection.Metadata; namespace Internal.TypeSystem.Ecma { public static class PrimitiveTypeProvider { public static TypeDesc GetPrimitiveType(TypeSystemContext context, PrimitiveTypeCode typeCode) { WellKnownType wkt; switch (typeCode) { case PrimitiveTypeCode.Boolean: wkt = WellKnownType.Boolean; break; case PrimitiveTypeCode.Byte: wkt = WellKnownType.Byte; break; case PrimitiveTypeCode.Char: wkt = WellKnownType.Char; break; case PrimitiveTypeCode.Double: wkt = WellKnownType.Double; break; case PrimitiveTypeCode.Int16: wkt = WellKnownType.Int16; break; case PrimitiveTypeCode.Int32: wkt = WellKnownType.Int32; break; case PrimitiveTypeCode.Int64: wkt = WellKnownType.Int64; break; case PrimitiveTypeCode.IntPtr: wkt = WellKnownType.IntPtr; break; case PrimitiveTypeCode.Object: wkt = WellKnownType.Object; break; case PrimitiveTypeCode.SByte: wkt = WellKnownType.SByte; break; case PrimitiveTypeCode.Single: wkt = WellKnownType.Single; break; case PrimitiveTypeCode.String: wkt = WellKnownType.String; break; case PrimitiveTypeCode.UInt16: wkt = WellKnownType.UInt16; break; case PrimitiveTypeCode.UInt32: wkt = WellKnownType.UInt32; break; case PrimitiveTypeCode.UInt64: wkt = WellKnownType.UInt64; break; case PrimitiveTypeCode.UIntPtr: wkt = WellKnownType.UIntPtr; break; case PrimitiveTypeCode.Void: wkt = WellKnownType.Void; break; case PrimitiveTypeCode.TypedReference: wkt = WellKnownType.TypedReference; break; default: throw new BadImageFormatException(); } return context.GetWellKnownType(wkt); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Reflection.Metadata; namespace Internal.TypeSystem.Ecma { public static class PrimitiveTypeProvider { public static TypeDesc GetPrimitiveType(TypeSystemContext context, PrimitiveTypeCode typeCode) { WellKnownType wkt; switch (typeCode) { case PrimitiveTypeCode.Boolean: wkt = WellKnownType.Boolean; break; case PrimitiveTypeCode.Byte: wkt = WellKnownType.Byte; break; case PrimitiveTypeCode.Char: wkt = WellKnownType.Char; break; case PrimitiveTypeCode.Double: wkt = WellKnownType.Double; break; case PrimitiveTypeCode.Int16: wkt = WellKnownType.Int16; break; case PrimitiveTypeCode.Int32: wkt = WellKnownType.Int32; break; case PrimitiveTypeCode.Int64: wkt = WellKnownType.Int64; break; case PrimitiveTypeCode.IntPtr: wkt = WellKnownType.IntPtr; break; case PrimitiveTypeCode.Object: wkt = WellKnownType.Object; break; case PrimitiveTypeCode.SByte: wkt = WellKnownType.SByte; break; case PrimitiveTypeCode.Single: wkt = WellKnownType.Single; break; case PrimitiveTypeCode.String: wkt = WellKnownType.String; break; case PrimitiveTypeCode.UInt16: wkt = WellKnownType.UInt16; break; case PrimitiveTypeCode.UInt32: wkt = WellKnownType.UInt32; break; case PrimitiveTypeCode.UInt64: wkt = WellKnownType.UInt64; break; case PrimitiveTypeCode.UIntPtr: wkt = WellKnownType.UIntPtr; break; case PrimitiveTypeCode.Void: wkt = WellKnownType.Void; break; case PrimitiveTypeCode.TypedReference: wkt = WellKnownType.TypedReference; break; default: throw new BadImageFormatException(); } return context.GetWellKnownType(wkt); } } }
-1
dotnet/runtime
66,084
Add support for static virtual methods
Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
MichalStrehovsky
2022-03-02T14:32:50Z
2022-03-03T07:30:19Z
7b83da5eb2bb247e400d2b8f66bea79c41332db8
8dcfacbdec618924f36a95688173a7c1c101592f
Add support for static virtual methods. Took the type system changes from #54063 and cleaned them up, added unit tests. Hooked it up into JitInterface/ResolveConstraintMethodApprox. Using the pre-existing `ConstrainedMethodUseLookupResult` that wasn't currently getting emitted. We'll want to use it for its original purpose at some point, but I think we can make this work for both instance and static constrained calls. Missing things: * Support creating delegates to static virtual methods. This will need a RyuJIT/JitInterface change. * Type loader support. If `MakeGeneric` needs static virtuals at runtime, it will throw. But this is enough to get HttpClient working again. Fixes #65613. Contributes to dotnet/runtimelab#1665.
./src/libraries/System.Drawing.Common/src/System/Drawing/LocalAppContextSwitches.Windows.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.CompilerServices; namespace System { internal static partial class LocalAppContextSwitches { private static int s_dontSupportPngFramesInIcons; public static bool DontSupportPngFramesInIcons { [MethodImpl(MethodImplOptions.AggressiveInlining)] get { return GetCachedSwitchValue(@"Switch.System.Drawing.DontSupportPngFramesInIcons", ref s_dontSupportPngFramesInIcons); } } private static int s_optimizePrintPreview; public static bool OptimizePrintPreview { [MethodImpl(MethodImplOptions.AggressiveInlining)] get { return GetCachedSwitchValue(@"Switch.System.Drawing.Printing.OptimizePrintPreview", ref s_optimizePrintPreview); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.CompilerServices; namespace System { internal static partial class LocalAppContextSwitches { private static int s_dontSupportPngFramesInIcons; public static bool DontSupportPngFramesInIcons { [MethodImpl(MethodImplOptions.AggressiveInlining)] get { return GetCachedSwitchValue(@"Switch.System.Drawing.DontSupportPngFramesInIcons", ref s_dontSupportPngFramesInIcons); } } private static int s_optimizePrintPreview; public static bool OptimizePrintPreview { [MethodImpl(MethodImplOptions.AggressiveInlining)] get { return GetCachedSwitchValue(@"Switch.System.Drawing.Printing.OptimizePrintPreview", ref s_optimizePrintPreview); } } } }
-1